diff --git a/.circleci/config.yml b/.circleci/config.yml index 2036eac612597..cc2b4b018970d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,7 +15,7 @@ experimental: templates: job_template: &job_template docker: - - image: gcr.io/datadoghq/agent-circleci-runner:v41089396-d65d75ec + - image: gcr.io/datadoghq/agent-circleci-runner:v45605580-5f42ffba environment: USE_SYSTEM_LIBS: "1" working_directory: /go/src/github.com/DataDog/datadog-agent diff --git a/.ddqa/config.toml b/.ddqa/config.toml index 02913ac890130..e24656da4b464 100644 --- a/.ddqa/config.toml +++ b/.ddqa/config.toml @@ -78,6 +78,7 @@ jira_issue_type = "Task" jira_statuses = ["To Do", "In Progress", "Done"] github_team = "opentelemetry" github_labels = ["team/opentelemetry"] +exclude_members = ["ancostas", "Maascamp"] [teams."eBPF Platform"] jira_project = "EBPF" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 15af633890318..4b9cea9f29568 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -47,12 +47,12 @@ /.circleci/ @DataDog/agent-devx-infra /.github/CODEOWNERS # do not notify anyone -/.github/*_TEMPLATE.md @DataDog/agent-all +/.github/*_TEMPLATE.md @DataDog/agent-devx-loops /.github/dependabot.yaml @DataDog/agent-devx-infra -/.github/workflows/serverless-benchmarks.yml @DataDog/serverless -/.github/workflows/serverless-binary-size.yml @DataDog/serverless -/.github/workflows/serverless-integration.yml @DataDog/serverless -/.github/workflows/serverless-vuln-scan.yml @DataDog/serverless +/.github/workflows/serverless-benchmarks.yml @DataDog/serverless @Datadog/serverless-aws +/.github/workflows/serverless-binary-size.yml @DataDog/serverless @Datadog/serverless-aws +/.github/workflows/serverless-integration.yml @DataDog/serverless @Datadog/serverless-aws +/.github/workflows/serverless-vuln-scan.yml @DataDog/serverless @Datadog/serverless-aws /.github/workflows/windows-*.yml @DataDog/windows-agent /.github/workflows/cws-btfhub-sync.yml @DataDog/agent-security /.github/workflows/gohai.yml @DataDog/agent-shared-components @@ -72,7 +72,7 @@ /.gitlab/binary_build/cluster_agent.yml @DataDog/container-integrations @DataDog/agent-delivery /.gitlab/binary_build/fakeintake.yml @DataDog/agent-devx-loops /.gitlab/binary_build/otel_agent.yml @DataDog/opentelemetry @DataDog/agent-delivery -/.gitlab/binary_build/serverless.yml @DataDog/serverless @DataDog/agent-delivery +/.gitlab/binary_build/serverless.yml @DataDog/serverless @Datadog/serverless-aws @DataDog/agent-delivery /.gitlab/binary_build/system_probe.yml @DataDog/ebpf-platform @DataDog/agent-delivery /.gitlab/binary_build/windows.yml @DataDog/agent-delivery @DataDog/windows-agent @@ -98,18 +98,19 @@ /.gitlab/common/test_infra_version.yml @DataDog/agent-devx-loops @DataDog/agent-devx-infra /.gitlab/e2e/e2e.yml @DataDog/container-integrations @DataDog/agent-devx-loops -/.gitlab/e2e/k8s_e2e.yml @DataDog/container-integrations @DataDog/agent-devx-loops /.gitlab/e2e/install_packages @DataDog/agent-delivery /.gitlab/container_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /.gitlab/binary_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /.gitlab/functional_test/security_agent.yml @DataDog/agent-security @DataDog/agent-devx-infra -/.gitlab/functional_test/serverless.yml @DataDog/serverless @DataDog/agent-devx-infra +/.gitlab/functional_test/serverless.yml @DataDog/serverless @Datadog/serverless-aws @DataDog/agent-devx-infra /.gitlab/functional_test_cleanup.yml @DataDog/agent-security @DataDog/windows-kernel-integrations @DataDog/agent-devx-infra /.gitlab/functional_test/system_probe_windows.yml @DataDog/agent-devx-infra @DataDog/windows-kernel-integrations /.gitlab/functional_test/common.yml @DataDog/agent-devx-infra @DataDog/windows-kernel-integrations /.gitlab/functional_test/oracle.yml @DataDog/agent-devx-infra @DataDog/database-monitoring +/.gitlab/powershell_script_deploy/powershell_script_deploy.yml @DataDog/agent-delivery @DataDog/windows-agent +/.gitlab/powershell_script_deploy/powershell_script_signing.yml @DataDog/agent-delivery @DataDog/windows-agent /.gitlab/choco_build/choco_build.yml @DataDog/agent-delivery @DataDog/windows-agent /.gitlab/choco_deploy/choco_deploy.yml @DataDog/agent-delivery @DataDog/windows-agent @@ -188,7 +189,7 @@ /cmd/dogstatsd/ @DataDog/agent-metrics-logs /cmd/otel-agent/ @DataDog/opentelemetry /cmd/process-agent/ @DataDog/processes -/cmd/serverless/ @DataDog/serverless +/cmd/serverless/ @DataDog/serverless @Datadog/serverless-aws /cmd/serverless/dependencies*.txt @DataDog/serverless @DataDog/agent-shared-components /cmd/serverless-init/ @DataDog/serverless /cmd/system-probe/ @DataDog/ebpf-platform @@ -205,6 +206,7 @@ /cmd/system-probe/modules/service_discover* @DataDog/apm-onboarding @DataDog/universal-service-monitoring /cmd/system-probe/modules/language_detection* @DataDog/processes @DataDog/universal-service-monitoring /cmd/system-probe/runtime/ @DataDog/agent-security +/cmd/system-probe/modules/dynamic_instrumentation* @DataDog/debugger /cmd/system-probe/windows/ @DataDog/windows-kernel-integrations /cmd/system-probe/windows_resources/ @DataDog/windows-kernel-integrations /cmd/system-probe/main_windows*.go @DataDog/windows-kernel-integrations @@ -310,7 +312,7 @@ /pkg/metrics/metricsource.go @DataDog/agent-metrics-logs @DataDog/agent-integrations /pkg/serializer/ @DataDog/agent-processing-and-routing /pkg/serializer/internal/metrics/origin_mapping.go @DataDog/agent-processing-and-routing @DataDog/agent-integrations -/pkg/serverless/ @DataDog/serverless +/pkg/serverless/ @DataDog/serverless @Datadog/serverless-aws /pkg/serverless/appsec/ @DataDog/asm-go /pkg/status/ @DataDog/agent-shared-components /pkg/status/templates/trace-agent.tmpl @DataDog/agent-apm @@ -324,7 +326,7 @@ /pkg/trace/api/otlp*.go @DataDog/opentelemetry /pkg/trace/traceutil/otel*.go @DataDog/opentelemetry /pkg/trace/stats/ @DataDog/agent-apm @DataDog/opentelemetry -/pkg/trace/telemetry/ @DataDog/telemetry-and-analytics +/pkg/trace/telemetry/ @DataDog/apm-trace-storage /comp/core/autodiscovery/listeners/ @DataDog/container-platform /comp/core/autodiscovery/listeners/cloudfoundry*.go @DataDog/platform-integrations /comp/core/autodiscovery/listeners/snmp*.go @DataDog/network-device-monitoring @@ -339,7 +341,7 @@ /pkg/clusteragent/admission/mutate/autoscaling @DataDog/container-integrations /pkg/clusteragent/admission/mutate/autoinstrumentation/ @DataDog/container-platform @DataDog/apm-onboarding /pkg/clusteragent/orchestrator/ @DataDog/container-app -/pkg/clusteragent/telemetry/ @DataDog/telemetry-and-analytics +/pkg/clusteragent/telemetry/ @DataDog/apm-trace-storage /pkg/collector/ @DataDog/agent-metrics-logs /pkg/collector/corechecks/cluster/ @DataDog/container-integrations /pkg/collector/corechecks/cluster/orchestrator @DataDog/container-app @@ -350,9 +352,9 @@ /pkg/collector/corechecks/ebpf/ebpf* @DataDog/ebpf-platform /pkg/collector/corechecks/ebpf/probe/ebpfcheck/ @DataDog/ebpf-platform /pkg/collector/corechecks/ebpf/c/runtime/ebpf* @DataDog/ebpf-platform -/pkg/collector/corechecks/embed/ @Datadog/agent-devx-infra -/pkg/collector/corechecks/embed/apm/ @Datadog/agent-devx-infra @DataDog/agent-apm -/pkg/collector/corechecks/embed/process/ @Datadog/agent-devx-infra @DataDog/processes +/pkg/collector/corechecks/embed/ @Datadog/agent-delivery +/pkg/collector/corechecks/embed/apm/ @DataDog/agent-apm +/pkg/collector/corechecks/embed/process/ @DataDog/processes /pkg/collector/corechecks/network-devices/ @DataDog/network-device-monitoring /pkg/collector/corechecks/orchestrator/ @DataDog/container-app /pkg/collector/corechecks/kubernetes/ @DataDog/container-integrations @@ -398,7 +400,7 @@ /pkg/flare/*_windows_test.go @Datadog/windows-agent /pkg/fleet/ @DataDog/fleet @DataDog/windows-agent /pkg/otlp/ @DataDog/opentelemetry -/pkg/otlp/*_serverless*.go @DataDog/serverless +/pkg/otlp/*_serverless*.go @DataDog/serverless @Datadog/serverless-aws /pkg/otlp/*_not_serverless*.go @DataDog/opentelemetry /pkg/pidfile/ @DataDog/agent-shared-components /pkg/persistentcache/ @DataDog/agent-metrics-logs @@ -425,6 +427,7 @@ /pkg/util/orchestrator/ @DataDog/container-app /pkg/util/podman/ @DataDog/container-integrations /pkg/util/prometheus @DataDog/container-integrations +/pkg/util/tagger @DataDog/container-platform /pkg/util/trivy/ @DataDog/container-integrations @DataDog/agent-security /pkg/util/uuid/ @DataDog/agent-shared-components /pkg/util/cgroups/ @DataDog/container-integrations @@ -457,6 +460,9 @@ /pkg/process/metadata/parser/*windows* @DataDog/universal-service-monitoring @DataDog/processes @DataDog/Networks @DataDog/windows-kernel-integrations /pkg/process/monitor/ @DataDog/universal-service-monitoring /pkg/process/net/ @DataDog/universal-service-monitoring @DataDog/processes @DataDog/Networks +/pkg/process/net/common_windows.go @DataDog/windows-agent +/pkg/process/net/windows_pipe.go @DataDog/windows-kernel-integrations +/pkg/process/net/windows_pipe_testutil.go @DataDog/windows-kernel-integrations /pkg/proto/datadog/remoteconfig/ @DataDog/remote-config /pkg/proto/pbgo/ # do not notify anyone /pkg/proto/pbgo/trace @DataDog/agent-apm @@ -563,13 +569,9 @@ /test/ @DataDog/agent-devx-loops /test/benchmarks/ @DataDog/agent-metrics-logs /test/benchmarks/kubernetes_state/ @DataDog/container-integrations -/test/e2e/ @DataDog/container-integrations @DataDog/agent-security -/test/e2e/cws-tests/ @DataDog/agent-security -/test/e2e/argo-workflows/otlp-workflow.yaml @DataDog/opentelemetry -/test/e2e/containers/otlp_sender/ @DataDog/opentelemetry /test/integration/ @DataDog/container-integrations -/test/integration/serverless @DataDog/serverless -/test/integration/serverless_perf @DataDog/serverless +/test/integration/serverless @DataDog/serverless @Datadog/serverless-aws +/test/integration/serverless_perf @DataDog/serverless @Datadog/serverless-aws /test/kitchen/ @DataDog/agent-devx-loops /test/kitchen/test-definitions/ @DataDog/agent-delivery /test/kitchen/test/integration/ @DataDog/agent-delivery @@ -594,6 +596,7 @@ /test/fakeintake/aggregator/servicediscoveryAggregator.go @DataDog/apm-onboarding @DataDog/universal-service-monitoring /test/fakeintake/aggregator/servicediscoveryAggregator_test.go @DataDog/apm-onboarding @DataDog/universal-service-monitoring /test/new-e2e/ @DataDog/agent-e2e-testing @DataDog/agent-devx-loops +/test/new-e2e/pkg/components/datadog-installer @DataDog/windows-agent /test/new-e2e/test-infra-definition @DataDog/agent-devx-loops /test/new-e2e/system-probe @DataDog/ebpf-platform /test/new-e2e/scenarios/system-probe @DataDog/ebpf-platform @@ -630,6 +633,7 @@ /tools/ebpf/ @DataDog/ebpf-platform /tools/gdb/ @DataDog/agent-shared-components /tools/go-update/ @DataDog/agent-shared-components +/tools/NamedPipeCmd/ @DataDog/windows-kernel-integrations /tools/retry_file_dump/ @DataDog/agent-metrics-logs /tools/windows/ @DataDog/windows-agent /tools/windows/DatadogAgentInstaller/WixSetup/localization-en-us.wxl @DataDog/windows-agent @DataDog/documentation diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 0ae6b23fa7e6f..cd1de234a6014 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,46 +1,25 @@ ### What does this PR do? - - ### Motivation - +### Describe how to test/QA your changes -### Additional Notes +### Possible Drawbacks / Trade-offs +### Additional Notes - -### Possible Drawbacks / Trade-offs - - - -### Describe how to test/QA your changes - - +--> \ No newline at end of file diff --git a/.github/workflows/add_milestone.yml b/.github/workflows/add_milestone.yml index ef43c0869e896..cc647378a5460 100644 --- a/.github/workflows/add_milestone.yml +++ b/.github/workflows/add_milestone.yml @@ -8,11 +8,15 @@ on: - main - "[0-9]+.[0-9]+.x" +permissions: {} + jobs: add-milestone-pr: name: Add Milestone on PR if: github.event.pull_request.merged == true runs-on: ubuntu-latest + permissions: + pull-requests: write env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_REPO: ${{ github.repository }} diff --git a/.github/workflows/backport-pr.yml b/.github/workflows/backport-pr.yml index 188cd54aadb8d..950d3ff41ca6a 100644 --- a/.github/workflows/backport-pr.yml +++ b/.github/workflows/backport-pr.yml @@ -5,6 +5,8 @@ on: - closed - labeled +permissions: {} + jobs: backport: name: Backport PR @@ -18,8 +20,11 @@ jobs: && contains(github.event.label.name, 'backport') ) ) + permissions: + contents: write + pull-requests: write steps: - - uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3 + - uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0 id: app-token with: app-id: ${{ vars.DD_GITHUB_TOKEN_GENERATOR_APP_ID }} diff --git a/.github/workflows/buildimages-update.yml b/.github/workflows/buildimages-update.yml index 9a04aceed38e4..454fc14e335a1 100644 --- a/.github/workflows/buildimages-update.yml +++ b/.github/workflows/buildimages-update.yml @@ -24,6 +24,8 @@ on: required: true type: boolean +permissions: {} + jobs: open-go-update-pr: runs-on: ubuntu-latest @@ -52,7 +54,7 @@ jobs: ref: ${{ inputs.branch }} - name: Setup Python and pip - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: # use Python < 3.12 so that distutil is still available by default python-version: 3.11 diff --git a/.github/workflows/chase_release_managers.yml b/.github/workflows/chase_release_managers.yml index 652746f1e93a9..c1a4cff2baad0 100644 --- a/.github/workflows/chase_release_managers.yml +++ b/.github/workflows/chase_release_managers.yml @@ -8,9 +8,10 @@ on: required: true type: string +permissions: {} jobs: - create_release_schedule: + chase_release_managers: runs-on: ubuntu-latest steps: - name: Checkout repository @@ -18,7 +19,7 @@ jobs: with: ref: ${{ github.head_ref }} - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" @@ -30,5 +31,6 @@ jobs: env: ATLASSIAN_USERNAME : ${{ secrets.ATLASSIAN_USERNAME }} ATLASSIAN_PASSWORD : ${{ secrets.ATLASSIAN_PASSWORD }} + SLACK_API_TOKEN : ${{ secrets.SLACK_DATADOG_AGENT_BOT_TOKEN }} run: | inv -e release.chase-release-managers --version ${{ github.event.inputs.version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4613f73359f16..1c600968af15f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -10,9 +10,13 @@ on: - main - "[0-9]+.[0-9]+.x" +permissions: {} + jobs: CodeQL-Build: runs-on: ubuntu-20.04 + permissions: + security-events: write strategy: matrix: language: ["go", "javascript", "python", "cpp"] @@ -24,9 +28,9 @@ jobs: fetch-depth: 0 - name: Setup Python3 - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: - python-version: "3.11.8" + python-version: "3.12.6" cache: "pip" - run: pip3 install -r requirements.txt diff --git a/.github/workflows/create_rc_pr.yml b/.github/workflows/create_rc_pr.yml index 0d190cb7bb606..bd40afbf6e315 100644 --- a/.github/workflows/create_rc_pr.yml +++ b/.github/workflows/create_rc_pr.yml @@ -9,6 +9,8 @@ on: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +permissions: {} + jobs: find_release_branches: runs-on: ubuntu-latest @@ -22,7 +24,7 @@ jobs: sparse-checkout: 'tasks' - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" @@ -48,6 +50,9 @@ jobs: create_rc_pr: runs-on: ubuntu-latest needs: find_release_branches + permissions: + contents: write + pull-requests: write strategy: matrix: value: ${{fromJSON(needs.find_release_branches.outputs.branches)}} @@ -59,7 +64,7 @@ jobs: fetch-depth: 0 - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" @@ -76,8 +81,8 @@ jobs: env: ATLASSIAN_USERNAME: ${{ secrets.ATLASSIAN_USERNAME }} ATLASSIAN_PASSWORD: ${{ secrets.ATLASSIAN_PASSWORD }} + SLACK_API_TOKEN : ${{ secrets.SLACK_DATADOG_AGENT_BOT_TOKEN }} run: | - export SLACK_API_TOKEN="${{ secrets.SLACK_API_TOKEN }}" echo "CHANGES=$(inv -e release.check-for-changes -r ${{ matrix.value }} ${{ needs.find_release_branches.outputs.warning }})" >> $GITHUB_OUTPUT - name: Create RC PR diff --git a/.github/workflows/create_release_schedule.yml b/.github/workflows/create_release_schedule.yml index e19372fa8f1d7..4f674987ecf05 100644 --- a/.github/workflows/create_release_schedule.yml +++ b/.github/workflows/create_release_schedule.yml @@ -12,6 +12,8 @@ on: required: true type: string +permissions: {} + jobs: create_release_schedule: @@ -22,7 +24,7 @@ jobs: with: ref: ${{ github.head_ref }} - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" diff --git a/.github/workflows/cws-btfhub-sync.yml b/.github/workflows/cws-btfhub-sync.yml index 970a0fef308f8..e5fe407c6307b 100644 --- a/.github/workflows/cws-btfhub-sync.yml +++ b/.github/workflows/cws-btfhub-sync.yml @@ -16,6 +16,8 @@ on: schedule: - cron: '30 4 * * 5' # at 4:30 UTC on Friday +permissions: {} + jobs: generate: runs-on: ubuntu-latest @@ -62,7 +64,7 @@ jobs: sparse-checkout: ${{ matrix.cone }} - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' @@ -91,6 +93,9 @@ jobs: combine: needs: generate runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write steps: - name: Checkout datadog-agent repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 @@ -98,7 +103,7 @@ jobs: ref: ${{ inputs.base_branch || 'main' }} - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' diff --git a/.github/workflows/datadog-static-analysis.yml b/.github/workflows/datadog-static-analysis.yml index c738e875c2991..e7030c87f71a9 100644 --- a/.github/workflows/datadog-static-analysis.yml +++ b/.github/workflows/datadog-static-analysis.yml @@ -2,6 +2,8 @@ on: [push] name: Datadog Static Analysis +permissions: {} + jobs: static-analysis: if: github.triggering_actor != 'dependabot[bot]' diff --git a/.github/workflows/do-not-merge.yml b/.github/workflows/do-not-merge.yml index 13886c696f679..a21f9e03d1fb2 100644 --- a/.github/workflows/do-not-merge.yml +++ b/.github/workflows/do-not-merge.yml @@ -10,6 +10,8 @@ on: branches: - mq-working-branch-* +permissions: {} + jobs: do-not-merge: if: ${{ contains(github.event.*.labels.*.name, 'do-not-merge/hold') || contains(github.event.*.labels.*.name, 'do-not-merge/WIP') }} diff --git a/.github/workflows/docs-dev.yml b/.github/workflows/docs-dev.yml index 7dba335ed58b7..04c29577b2bbf 100644 --- a/.github/workflows/docs-dev.yml +++ b/.github/workflows/docs-dev.yml @@ -14,6 +14,8 @@ on: - docs/** - .github/workflows/docs-dev.yml +permissions: {} + concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: ${{ github.event_name == 'pull_request' && true || false }} @@ -29,7 +31,7 @@ jobs: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: '3.12' @@ -54,6 +56,8 @@ jobs: publish: runs-on: ubuntu-latest + permissions: + contents: write if: github.event_name == 'push' && github.ref == 'refs/heads/main' needs: diff --git a/.github/workflows/external-contributor.yml b/.github/workflows/external-contributor.yml index 3cc35d4cc12e2..d03850b0b15f6 100644 --- a/.github/workflows/external-contributor.yml +++ b/.github/workflows/external-contributor.yml @@ -6,10 +6,14 @@ on: pull_request_target: types: [opened, reopened] +permissions: {} + jobs: external-contributor-prs: name: Handle Fork PRs runs-on: ubuntu-latest + permissions: + pull-requests: write if: github.event.pull_request.head.repo.full_name != github.repository steps: - name: Checkout repository @@ -18,7 +22,7 @@ jobs: ref: main fetch-depth: 0 - name: Setup python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' diff --git a/.github/workflows/go-update-commenter.yml b/.github/workflows/go-update-commenter.yml index 9925fba1614ca..ff1d104c691ec 100644 --- a/.github/workflows/go-update-commenter.yml +++ b/.github/workflows/go-update-commenter.yml @@ -5,6 +5,8 @@ on: # Only run on PR label events (in particular not on every commit) types: [ labeled ] +permissions: {} + jobs: old-versions-match: # Only run if the PR is labeled with 'go-update' diff --git a/.github/workflows/go_mod_tidy.yml b/.github/workflows/go_mod_tidy.yml index d90caf056fb82..a01b9d29ad803 100644 --- a/.github/workflows/go_mod_tidy.yml +++ b/.github/workflows/go_mod_tidy.yml @@ -30,7 +30,7 @@ jobs: with: go-version-file: ".go-version" - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" diff --git a/.github/workflows/gohai.yml b/.github/workflows/gohai.yml index bb20f0e0104df..a328f67c5b853 100644 --- a/.github/workflows/gohai.yml +++ b/.github/workflows/gohai.yml @@ -12,6 +12,8 @@ on: paths: - "pkg/gohai/**" +permissions: {} + jobs: gohai_test: strategy: diff --git a/.github/workflows/label-analysis.yml b/.github/workflows/label-analysis.yml index 08980653d1d83..29f35d223e758 100644 --- a/.github/workflows/label-analysis.yml +++ b/.github/workflows/label-analysis.yml @@ -13,15 +13,19 @@ env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_REPO: ${{ github.repository }} +permissions: {} + jobs: assign-team-label: if: github.triggering_actor != 'dd-devflow[bot]' runs-on: ubuntu-latest + permissions: + pull-requests: write steps: - name: Checkout repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - name: Setup python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' @@ -39,7 +43,7 @@ jobs: with: fetch-depth: 0 - name: Setup python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 5cade58e6495c..b92075b895975 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -6,6 +6,8 @@ on: - main - "[0-9]+.[0-9]+.x" +permissions: {} + jobs: label: permissions: diff --git a/.github/workflows/markdown-lint-check.yml b/.github/workflows/markdown-lint-check.yml index 94386e05e6621..6ade9a5ec8560 100644 --- a/.github/workflows/markdown-lint-check.yml +++ b/.github/workflows/markdown-lint-check.yml @@ -3,6 +3,8 @@ name: Check Markdown links on: pull_request: +permissions: {} + jobs: markdown-link-check: runs-on: ubuntu-latest diff --git a/.github/workflows/serverless-benchmarks.yml b/.github/workflows/serverless-benchmarks.yml index 37742948b0d36..40ac0953e42a4 100644 --- a/.github/workflows/serverless-benchmarks.yml +++ b/.github/workflows/serverless-benchmarks.yml @@ -14,6 +14,8 @@ concurrency: group: ${{ github.workflow }}/PR#${{ github.event.pull_request.number }} cancel-in-progress: true +permissions: {} + jobs: baseline: name: Baseline diff --git a/.github/workflows/serverless-binary-size.yml b/.github/workflows/serverless-binary-size.yml index e396e8a78751d..9240bb666b3f9 100644 --- a/.github/workflows/serverless-binary-size.yml +++ b/.github/workflows/serverless-binary-size.yml @@ -6,9 +6,13 @@ on: env: SIZE_ALLOWANCE: fromJSON(1000000) # 1 MB +permissions: {} + jobs: comment: runs-on: ubuntu-latest + permissions: + pull-requests: write # Add comment to PR steps: - name: Checkout datadog-agent repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 diff --git a/.github/workflows/serverless-integration.yml b/.github/workflows/serverless-integration.yml index 8bd8459b6c52f..c2866e77f69b4 100644 --- a/.github/workflows/serverless-integration.yml +++ b/.github/workflows/serverless-integration.yml @@ -12,6 +12,8 @@ on: schedule: - cron: '0 14 * * *' # cron schedule uses UTC timezone. Run tests at the beginning of the day in US-East +permissions: {} + jobs: test: runs-on: ubuntu-latest diff --git a/.github/workflows/slapr.yml b/.github/workflows/slapr.yml index 48be5e393fd38..e88d67945b5a2 100644 --- a/.github/workflows/slapr.yml +++ b/.github/workflows/slapr.yml @@ -7,6 +7,8 @@ # pull_request: # types: [closed] # +# permissions: {} +# # jobs: # run_slapr_datadog_agent: # runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index ab4936e5d46b1..83c6d28794cff 100644 --- a/.gitignore +++ b/.gitignore @@ -112,16 +112,6 @@ pkg/process/config/logs *.ninja compile_commands.json pkg/ebpf/bytecode/build/**/*.d -pkg/ebpf/bytecode/runtime/conntrack.go -pkg/ebpf/bytecode/runtime/http.go -pkg/ebpf/bytecode/runtime/usm.go -pkg/ebpf/bytecode/runtime/shared-libraries.go -pkg/ebpf/bytecode/runtime/offsetguess-test.go -pkg/ebpf/bytecode/runtime/oom-kill.go -pkg/ebpf/bytecode/runtime/runtime-security.go -pkg/ebpf/bytecode/runtime/tcp-queue-length.go -pkg/ebpf/bytecode/runtime/tracer.go -pkg/ebpf/bytecode/runtime/logdebug-test.go pkg/security/tests/syscall_tester/**/*.d # dsd artifacts diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c50880e298007..5a4769bb42264 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,13 +1,15 @@ --- include: - .gitlab/.pre/cancel-prev-pipelines.yml - - .gitlab/.pre/test_gitlab_configuration.yml + - .gitlab/.pre/gitlab_configuration.yml - .gitlab/benchmarks/include.yml - .gitlab/binary_build/include.yml - .gitlab/check_deploy/check_deploy.yml - .gitlab/check_merge/do_not_merge.yml - .gitlab/choco_build/choco_build.yml - .gitlab/choco_deploy/choco_deploy.yml + - .gitlab/powershell_script_signing/powershell_script_signing.yml + - .gitlab/powershell_script_deploy/powershell_script_deploy.yml - .gitlab/common/shared.yml - .gitlab/common/skip_ci_check.yml - .gitlab/common/pr_commenter.yml @@ -22,7 +24,6 @@ include: - .gitlab/deps_fetch/deps_fetch.yml - .gitlab/dev_container_deploy/include.yml - .gitlab/e2e/e2e.yml - - .gitlab/e2e_k8s/e2e_k8s.yml - .gitlab/e2e_install_packages/include.yml - .gitlab/e2e_pre_test/e2e_pre_test.yml - .gitlab/functional_test/include.yml @@ -35,6 +36,7 @@ include: - .gitlab/kitchen_cleanup/include.yml - .gitlab/kitchen_deploy/kitchen_deploy.yml - .gitlab/kitchen_testing/include.yml + - .gitlab/lint/include.yml - .gitlab/maintenance_jobs/include.yml - .gitlab/notify/notify.yml - .gitlab/package_build/include.yml @@ -49,6 +51,7 @@ include: default: retry: max: 2 + exit_codes: 42 when: - runner_system_failure - stuck_or_timeout_failure @@ -64,6 +67,7 @@ stages: - maintenance_jobs - deps_build - deps_fetch + - lint - source_test - source_test_stats - software_composition_analysis @@ -89,8 +93,8 @@ stages: - deploy_cws_instrumentation - deploy_dca - trigger_release - - choco_build - - choco_deploy + - choco_and_install_script_build + - choco_and_install_script_deploy - internal_image_deploy - install_script_testing - e2e_pre_test @@ -140,6 +144,7 @@ variables: WINDOWS_TESTING_S3_BUCKET_A6: pipelines/A6/$CI_PIPELINE_ID WINDOWS_TESTING_S3_BUCKET_A7: pipelines/A7/$CI_PIPELINE_ID WINDOWS_BUILDS_S3_BUCKET: $WIN_S3_BUCKET/builds + WINDOWS_POWERSHELL_DIR: $CI_PROJECT_DIR/signed_scripts DEB_RPM_TESTING_BUCKET_BRANCH: testing # branch of the DEB_TESTING_S3_BUCKET and RPM_TESTING_S3_BUCKET repos to release to, 'testing' S3_CP_OPTIONS: --no-progress --region us-east-1 --sse AES256 S3_CP_CMD: aws s3 cp $S3_CP_OPTIONS @@ -168,15 +173,15 @@ variables: # To use images from datadog-agent-buildimages dev branches, set the corresponding # SUFFIX variable to _test_only DATADOG_AGENT_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_BUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_BUILDIMAGES: v45605580-5f42ffba DATADOG_AGENT_WINBUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_WINBUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_WINBUILDIMAGES: v45605580-5f42ffba DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_ARMBUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_ARMBUILDIMAGES: v45605580-5f42ffba DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v45605580-5f42ffba DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v45605580-5f42ffba DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded DEB_GPG_KEY_ID: c0962c7d @@ -194,61 +199,71 @@ variables: # skip known flaky tests by default GO_TEST_SKIP_FLAKE: "true" - # List of parameters retrieved from AWS SSM + # Start aws ssm variables # They must be defined as environment variables in the GitLab CI/CD settings, to ease rotation if needed - AGENT_QA_PROFILE_SSM_NAME: ci.datadog-agent.agent-qa-profile # agent-devx-infra - API_KEY_ORG2_SSM_NAME: ci.datadog-agent.datadog_api_key_org2 # agent-devx-infra - API_KEY_DDDEV_SSM_NAME: ci.datadog-agent.datadog_api_key # agent-devx-infra - APP_KEY_ORG2_SSM_NAME: ci.datadog-agent.datadog_app_key_org2 # agent-devx-infra - CHANGELOG_COMMIT_SHA_SSM_NAME: ci.datadog-agent.gitlab_changelog_commit_sha # agent-devx-infra - CHOCOLATEY_API_KEY_SSM_NAME: ci.datadog-agent.chocolatey_api_key # windows-agent - CODECOV_TOKEN_SSM_NAME: ci.datadog-agent.codecov_token # agent-devx-infra - DEB_GPG_KEY_SSM_NAME: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} # agent-delivery - DEB_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} # agent-delivery - DOCKER_REGISTRY_LOGIN_SSM_KEY: ci.datadog-agent.docker_hub_login # container-integrations - DOCKER_REGISTRY_PWD_SSM_KEY: ci.datadog-agent.docker_hub_pwd # container-integrations - E2E_TESTS_API_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_api_key # agent-devx-loops - E2E_TESTS_APP_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_app_key # agent-devx-loops - E2E_TESTS_RC_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_rc_key # agent-devx-loops + AGENT_QA_PROFILE: ci.datadog-agent.agent-qa-profile # agent-devx-infra + API_KEY_ORG2: ci.datadog-agent.datadog_api_key_org2 # agent-devx-infra + API_KEY_DDDEV: ci.datadog-agent.datadog_api_key # agent-devx-infra + APP_KEY_ORG2: ci.datadog-agent.datadog_app_key_org2 # agent-devx-infra + CHANGELOG_COMMIT_SHA: ci.datadog-agent.gitlab_changelog_commit_sha # agent-devx-infra + CHOCOLATEY_API_KEY: ci.datadog-agent.chocolatey_api_key # windows-agent + CODECOV_TOKEN: ci.datadog-agent.codecov_token # agent-devx-infra + DEB_GPG_KEY: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} # agent-delivery + DEB_SIGNING_PASSPHRASE: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} # agent-delivery + DOCKER_REGISTRY_LOGIN: ci.datadog-agent.docker_hub_login # container-integrations + DOCKER_REGISTRY_PWD: ci.datadog-agent.docker_hub_pwd # container-integrations + E2E_TESTS_API_KEY: ci.datadog-agent.e2e_tests_api_key # agent-devx-loops + E2E_TESTS_APP_KEY: ci.datadog-agent.e2e_tests_app_key # agent-devx-loops + E2E_TESTS_RC_KEY: ci.datadog-agent.e2e_tests_rc_key # agent-devx-loops E2E_TESTS_AZURE_CLIENT_ID: ci.datadog-agent.e2e_tests_azure_client_id # agent-devx-loops E2E_TESTS_AZURE_CLIENT_SECRET: ci.datadog-agent.e2e_tests_azure_client_secret # agent-devx-loops E2E_TESTS_AZURE_TENANT_ID: ci.datadog-agent.e2e_tests_azure_tenant_id # agent-devx-loops E2E_TESTS_AZURE_SUBSCRIPTION_ID: ci.datadog-agent.e2e_tests_azure_subscription_id # agent-devx-loops - KITCHEN_EC2_SSH_KEY_SSM_NAME: ci.datadog-agent.aws_ec2_kitchen_ssh_key # agent-devx-loops - KITCHEN_AZURE_CLIENT_ID_SSM_NAME: ci.datadog-agent.azure_kitchen_client_id # agent-devx-loops - KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME: ci.datadog-agent.azure_kitchen_client_secret # agent-devx-loops - KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME: ci.datadog-agent.azure_kitchen_subscription_id # agent-devx-loops - KITCHEN_AZURE_TENANT_ID_SSM_NAME: ci.datadog-agent.azure_kitchen_tenant_id # agent-devx-loops - GITHUB_PR_COMMENTER_APP_KEY_SSM_NAME: pr-commenter.github_app_key # agent-devx-infra - GITHUB_PR_COMMENTER_INTEGRATION_ID_SSM_NAME: pr-commenter.github_integration_id # agent-devx-infra - GITHUB_PR_COMMENTER_INSTALLATION_ID_SSM_NAME: pr-commenter.github_installation_id # agent-devx-infra - GITLAB_SCHEDULER_TOKEN_SSM_NAME: ci.datadog-agent.gitlab_pipelines_scheduler_token # ci-cd - GITLAB_READ_API_TOKEN_SSM_NAME: ci.datadog-agent.gitlab_read_api_token # ci-cd - GITLAB_FULL_API_TOKEN_SSM_NAME: ci.datadog-agent.gitlab_full_api_token # ci-cd - INSTALL_SCRIPT_API_KEY_SSM_NAME: ci.agent-linux-install-script.datadog_api_key_2 # agent-delivery - JIRA_READ_API_TOKEN_SSM_NAME: ci.datadog-agent.jira_read_api_token # agent-devx-infra - AGENT_GITHUB_APP_ID_SSM_NAME: ci.datadog-agent.platform-github-app-id # agent-devx-infra - AGENT_GITHUB_INSTALLATION_ID_SSM_NAME: ci.datadog-agent.platform-github-app-installation-id # agent-devx-infra - AGENT_GITHUB_KEY_SSM_NAME: ci.datadog-agent.platform-github-app-key # agent-devx-infra - MACOS_GITHUB_APP_ID_SSM_NAME: ci.datadog-agent.macos_github_app_id # agent-devx-infra - MACOS_GITHUB_INSTALLATION_ID_SSM_NAME: ci.datadog-agent.macos_github_installation_id # agent-devx-infra - MACOS_GITHUB_KEY_SSM_NAME: ci.datadog-agent.macos_github_key_b64 # agent-devx-infra - MACOS_GITHUB_APP_ID_2_SSM_NAME: ci.datadog-agent.macos_github_app_id_2 # agent-devx-infra - MACOS_GITHUB_INSTALLATION_ID_2_SSM_NAME: ci.datadog-agent.macos_github_installation_id_2 # agent-devx-infra - MACOS_GITHUB_KEY_2_SSM_NAME: ci.datadog-agent.macos_github_key_b64_2 # agent-devx-infra - RPM_GPG_KEY_SSM_NAME: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} # agent-delivery - RPM_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} # agent-delivery - SLACK_AGENT_CI_TOKEN_SSM_NAME: ci.datadog-agent.slack_agent_ci_token # agent-devx-infra - SMP_ACCOUNT_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-account-id # single-machine-performance - SMP_AGENT_TEAM_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-agent-team-id # single-machine-performance - SMP_API_SSM_NAME: ci.datadog-agent.single-machine-performance-api # single-machine-performance - SMP_BOT_ACCESS_KEY_SSM_NAME: ci.datadog-agent.single-machine-performance-bot-access-key # single-machine-performance - SMP_BOT_ACCESS_KEY_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-bot-access-key-id # single-machine-performance - SSH_KEY_SSM_NAME: ci.datadog-agent.ssh_key # system-probe - SSH_KEY_RSA_SSM_NAME: ci.datadog-agent.ssh_key_rsa # agent-devx-loops - SSH_PUBLIC_KEY_RSA_SSM_NAME: ci.datadog-agent.ssh_public_key_rsa # agent-devx-loops - VCPKG_BLOB_SAS_URL_SSM_NAME: ci.datadog-agent-buildimages.vcpkg_blob_sas_url # windows-agent - WINGET_PAT_SSM_NAME: ci.datadog-agent.winget_pat # windows-agent + E2E_TESTS_GCP_CREDENTIALS: ci.datadog-agent.e2e_tests_gcp_credentials # agent-devx-loops + KITCHEN_EC2_SSH_KEY: ci.datadog-agent.aws_ec2_kitchen_ssh_key # agent-devx-loops + KITCHEN_AZURE_CLIENT_ID: ci.datadog-agent.azure_kitchen_client_id # agent-devx-loops + KITCHEN_AZURE_CLIENT_SECRET: ci.datadog-agent.azure_kitchen_client_secret # agent-devx-loops + KITCHEN_AZURE_SUBSCRIPTION_ID: ci.datadog-agent.azure_kitchen_subscription_id # agent-devx-loops + KITCHEN_AZURE_TENANT_ID: ci.datadog-agent.azure_kitchen_tenant_id # agent-devx-loops + GITHUB_PR_COMMENTER_APP_KEY: pr-commenter.github_app_key # agent-devx-infra + GITHUB_PR_COMMENTER_INTEGRATION_ID: pr-commenter.github_integration_id # agent-devx-infra + GITHUB_PR_COMMENTER_INSTALLATION_ID: pr-commenter.github_installation_id # agent-devx-infra + GITLAB_SCHEDULER_TOKEN: ci.datadog-agent.gitlab_pipelines_scheduler_token # ci-cd + GITLAB_READ_API_TOKEN: ci.datadog-agent.gitlab_read_api_token # ci-cd + GITLAB_FULL_API_TOKEN: ci.datadog-agent.gitlab_full_api_token # ci-cd + INSTALL_SCRIPT_API_KEY: ci.agent-linux-install-script.datadog_api_key_2 # agent-delivery + JIRA_READ_API_TOKEN: ci.datadog-agent.jira_read_api_token # agent-devx-infra + AGENT_GITHUB_APP_ID: ci.datadog-agent.platform-github-app-id # agent-devx-infra + AGENT_GITHUB_INSTALLATION_ID: ci.datadog-agent.platform-github-app-installation-id # agent-devx-infra + AGENT_GITHUB_KEY: ci.datadog-agent.platform-github-app-key # agent-devx-infra + MACOS_GITHUB_APP_ID: ci.datadog-agent.macos_github_app_id # agent-devx-infra + MACOS_GITHUB_INSTALLATION_ID: ci.datadog-agent.macos_github_installation_id # agent-devx-infra + MACOS_GITHUB_KEY: ci.datadog-agent.macos_github_key_b64 # agent-devx-infra + MACOS_GITHUB_APP_ID_2: ci.datadog-agent.macos_github_app_id_2 # agent-devx-infra + MACOS_GITHUB_INSTALLATION_ID_2: ci.datadog-agent.macos_github_installation_id_2 # agent-devx-infra + MACOS_GITHUB_KEY_2: ci.datadog-agent.macos_github_key_b64_2 # agent-devx-infra + RPM_GPG_KEY: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} # agent-delivery + RPM_SIGNING_PASSPHRASE: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} # agent-delivery + SLACK_AGENT_CI_TOKEN: ci.datadog-agent.slack_agent_ci_token # agent-devx-infra + SMP_ACCOUNT_ID: ci.datadog-agent.single-machine-performance-account-id # single-machine-performance + SMP_AGENT_TEAM_ID: ci.datadog-agent.single-machine-performance-agent-team-id # single-machine-performance + SMP_API: ci.datadog-agent.single-machine-performance-api # single-machine-performance + SMP_BOT_ACCESS_KEY: ci.datadog-agent.single-machine-performance-bot-access-key # single-machine-performance + SMP_BOT_ACCESS_KEY_ID: ci.datadog-agent.single-machine-performance-bot-access-key-id # single-machine-performance + SSH_KEY: ci.datadog-agent.ssh_key # system-probe + SSH_KEY_RSA: ci.datadog-agent.ssh_key_rsa # agent-devx-loops + SSH_PUBLIC_KEY_RSA: ci.datadog-agent.ssh_public_key_rsa # agent-devx-loops + VCPKG_BLOB_SAS_URL: ci.datadog-agent-buildimages.vcpkg_blob_sas_url # windows-agent + WINGET_PAT: ci.datadog-agent.winget_pat # windows-agent + # End aws ssm variables + # Start vault variables + # End vault variables + + ATLASSIAN_WRITE: atlassian-write + AGENT_GITHUB_APP: agent-github-app + MACOS_GITHUB_APP_1: macos-github-app-one + MACOS_GITHUB_APP_2: macos-github-app-two + DOCKER_REGISTRY_RO: dockerhub-readonly DD_PKG_VERSION: "latest" @@ -259,6 +274,7 @@ variables: RESTORE_CACHE_ATTEMPTS: 2 # Feature flags FF_SCRIPT_SECTIONS: 1 # Prevent multiline scripts log collapsing, see https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3392 + FF_KUBERNETES_HONOR_ENTRYPOINT: true # Honor the entrypoint in the Docker image when running Kubernetes jobs # # Condition mixins for simplification of rules @@ -698,6 +714,7 @@ workflow: - .gitlab/functional_test/security_agent.yml - .gitlab/kernel_matrix_testing/security_agent.yml - .gitlab/kernel_matrix_testing/common.yml + - .gitlab/source_test/ebpf.yml - test/new-e2e/system-probe/**/* - test/new-e2e/scenarios/system-probe/**/* - test/new-e2e/pkg/runner/**/* @@ -741,6 +758,7 @@ workflow: - pkg/util/kernel/**/* - .gitlab/kernel_matrix_testing/system_probe.yml - .gitlab/kernel_matrix_testing/common.yml + - .gitlab/source_test/ebpf.yml - test/new-e2e/system-probe/**/* - test/new-e2e/scenarios/system-probe/**/* - test/new-e2e/pkg/runner/**/* @@ -789,6 +807,7 @@ workflow: paths: - test/new-e2e/pkg/**/* - test/new-e2e/go.mod + - flakes.yaml compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 .on_e2e_or_windows_installer_changes: @@ -1027,6 +1046,15 @@ workflow: - when: manual allow_failure: true +.on_cspm_or_e2e_changes: + - !reference [.on_e2e_main_release_or_rc] + - changes: + paths: + - pkg/security/**/* + - test/new-e2e/tests/cspm/**/* #TODO: Add other paths that should trigger the execution of CSPM e2e tests + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + when: on_success + .on_windows_systemprobe_or_e2e_changes: - !reference [.on_e2e_main_release_or_rc] - changes: diff --git a/.gitlab/.pre/cancel-prev-pipelines.yml b/.gitlab/.pre/cancel-prev-pipelines.yml index afbdde2dbd51b..48b5170248759 100644 --- a/.gitlab/.pre/cancel-prev-pipelines.yml +++ b/.gitlab/.pre/cancel-prev-pipelines.yml @@ -14,6 +14,5 @@ cancel-prev-pipelines: when: never - when: on_success script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - inv pipeline.auto-cancel-previous-pipelines diff --git a/.gitlab/.pre/create_release_qa_cards.yml b/.gitlab/.pre/create_release_qa_cards.yml new file mode 100644 index 0000000000000..d22c256bb3609 --- /dev/null +++ b/.gitlab/.pre/create_release_qa_cards.yml @@ -0,0 +1,14 @@ +--- +create_release_qa_cards: + stage: .pre + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + rules: + - !reference [.on_deploy_rc] + script: + - !reference [.setup_agent_github_app] + - ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $ATLASSIAN_WRITE token) || exit $?; export ATLASSIAN_PASSWORD + - ATLASSIAN_USERNAME=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $ATLASSIAN_WRITE user) || exit $?; export ATLASSIAN_USERNAME + - pip install ddqa + - inv release.create-qa-cards -t ${CI_COMMIT_REF_NAME} + allow_failure: true diff --git a/.gitlab/.pre/gitlab_configuration.yml b/.gitlab/.pre/gitlab_configuration.yml new file mode 100644 index 0000000000000..2114c9aaffb95 --- /dev/null +++ b/.gitlab/.pre/gitlab_configuration.yml @@ -0,0 +1,49 @@ +test_gitlab_configuration: + stage: .pre + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + rules: + - !reference [.on_gitlab_changes] + script: + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN + - inv -e linter.gitlab-ci + - inv -e linter.job-change-path + - inv -e linter.gitlab-change-paths + - inv -e linter.ssm-parameters + +test_gitlab_compare_to: + stage: .pre + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + rules: + - !reference [.on_gitlab_changes] + script: + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN + - !reference [.setup_agent_github_app] + - pip install -r tasks/requirements.txt + - inv pipeline.compare-to-itself + +# Computes and uploads the GitLab CI configuration diff as an artifact +compute_gitlab_ci_config: + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + stage: .pre + needs: [] + tags: ["arch:arm64"] + rules: + - if: $CI_PIPELINE_SOURCE != "push" + when: never + - when: on_success + before_script: + # Get main history + - git fetch origin main + - git checkout main + - git checkout - + script: + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN + - mkdir -p artifacts + - inv -e gitlab.compute-gitlab-ci-config --before-file artifacts/before.gitlab-ci.yml --after-file artifacts/after.gitlab-ci.yml --diff-file artifacts/diff.gitlab-ci.yml + artifacts: + when: always + paths: + - artifacts/ + expire_in: 1 day diff --git a/.gitlab/.pre/test_gitlab_configuration.yml b/.gitlab/.pre/test_gitlab_configuration.yml deleted file mode 100644 index 529cc1c8d2956..0000000000000 --- a/.gitlab/.pre/test_gitlab_configuration.yml +++ /dev/null @@ -1,26 +0,0 @@ -test_gitlab_configuration: - stage: .pre - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - rules: - - !reference [.on_gitlab_changes] - script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) - - inv -e linter.gitlab-ci - - inv -e linter.job-change-path - - inv -e linter.gitlab-change-paths - - inv -e linter.ssm-parameters - -test_gitlab_compare_to: - stage: .pre - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - rules: - - !reference [.on_gitlab_changes] - script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) - - !reference [.setup_agent_github_app] - - pip install -r tasks/requirements.txt - - inv pipeline.compare-to-itself diff --git a/.gitlab/JOBOWNERS b/.gitlab/JOBOWNERS index 913e732bc6620..8e1e4cc106a0e 100644 --- a/.gitlab/JOBOWNERS +++ b/.gitlab/JOBOWNERS @@ -42,6 +42,7 @@ iot-agent-* @DataDog/agent-delivery agent_suse* @DataDog/agent-delivery windows-installer* @DataDog/windows-agent windows_* @DataDog/windows-agent +powershell_script_signing @DataDog/windows-agent # Packaging agent_deb* @DataDog/agent-delivery @@ -105,6 +106,8 @@ deploy_installer* @DataDog/agent-delivery deploy_packages* @DataDog/agent-delivery deploy_staging* @DataDog/agent-delivery publish_winget* @DataDog/windows-agent +powershell_script_deploy @DataDog/windows-agent +windows_bootstrapper_deploy @DataDog/windows-agent qa_*_oci @DataDog/agent-delivery # Deploy containers @@ -142,15 +145,18 @@ new-e2e-process* @DataDog/processes new-e2e-agent-platform* @DataDog/agent-delivery new-e2e-aml* @DataDog/agent-metrics-logs new-e2e-apm* @DataDog/agent-apm +new-e2e-discovery* @Datadog/universal-service-monitoring new-e2e-ndm* @DataDog/network-device-monitoring new-e2e-npm* @DataDog/Networks new-e2e-cws* @DataDog/agent-security -new-e2e-windows-agent* @DataDog/windows-agent new-e2e-orchestrator* @DataDog/container-app e2e_pre_test* @DataDog/agent-devx-loops new-e2e-remote-config* @DataDog/remote-config new-e2e-installer* @DataDog/fleet -new-e2e-windows-service-test @DataDog/windows-agent +new-e2e-installer-windows @DataDog/windows-agent +new-e2e-windows* @DataDog/windows-agent +new-e2e-windows-systemprobe @DataDog/windows-kernel-integrations +new-e2e-windows-security-agent @DataDog/windows-kernel-integrations new-e2e_windows_powershell_module_test @DataDog/windows-kernel-integrations # Kernel matrix testing diff --git a/.gitlab/binary_build/cluster_agent.yml b/.gitlab/binary_build/cluster_agent.yml index ea2eac369c8ac..b20ff9d10331e 100644 --- a/.gitlab/binary_build/cluster_agent.yml +++ b/.gitlab/binary_build/cluster_agent.yml @@ -23,7 +23,6 @@ cluster_agent-build_amd64: variables: ARCH: amd64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] cluster_agent-build_arm64: @@ -36,5 +35,4 @@ cluster_agent-build_arm64: variables: ARCH: arm64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] diff --git a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml index 501c0d8dd3d38..0a3b9802eb47b 100644 --- a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml +++ b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml @@ -15,12 +15,11 @@ cluster_agent_cloudfoundry-build_amd64: ARCH: amd64 KUBERNETES_CPU_REQUEST: 4 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version - inv -e cluster-agent-cloudfoundry.build - cd $CI_PROJECT_DIR/$CLUSTER_AGENT_CLOUDFOUNDRY_BINARIES_DIR - mkdir -p $OMNIBUS_PACKAGE_DIR - - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) + - PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) || exit $? - tar cf $OMNIBUS_PACKAGE_DIR/datadog-cluster-agent-cloudfoundry-$PACKAGE_VERSION-$ARCH.tar.xz datadog-cluster-agent-cloudfoundry diff --git a/.gitlab/binary_build/cws_instrumentation.yml b/.gitlab/binary_build/cws_instrumentation.yml index b6d517df2f52c..787be00f814cb 100644 --- a/.gitlab/binary_build/cws_instrumentation.yml +++ b/.gitlab/binary_build/cws_instrumentation.yml @@ -17,7 +17,6 @@ cws_instrumentation-build_amd64: variables: ARCH: amd64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] cws_instrumentation-build_arm64: @@ -30,5 +29,4 @@ cws_instrumentation-build_arm64: variables: ARCH: arm64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] diff --git a/.gitlab/binary_build/linux.yml b/.gitlab/binary_build/linux.yml index d8644d63a2c9c..56ae035b566e4 100644 --- a/.gitlab/binary_build/linux.yml +++ b/.gitlab/binary_build/linux.yml @@ -1,16 +1,12 @@ --- build_dogstatsd_static-binary_x64: stage: binary_build - rules: - - !reference [.except_mergequeue] - - when: on_success image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] needs: ["lint_linux-x64", "go_deps"] variables: ARCH: amd64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version @@ -28,7 +24,6 @@ build_dogstatsd_static-binary_arm64: variables: ARCH: arm64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version @@ -44,7 +39,6 @@ build_dogstatsd-binary_x64: tags: ["arch:amd64"] needs: ["lint_linux-x64", "go_deps"] before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version @@ -62,7 +56,6 @@ build_dogstatsd-binary_arm64: variables: ARCH: arm64 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version @@ -83,7 +76,6 @@ build_iot_agent-binary_x64: before_script: - !reference [.retrieve_linux_go_deps] script: - - source /root/.bashrc - inv check-go-version - inv -e agent.build --flavor iot --major-version 7 - $S3_CP_CMD $CI_PROJECT_DIR/$AGENT_BINARIES_DIR/agent $S3_ARTIFACTS_URI/iot/agent @@ -100,7 +92,6 @@ build_iot_agent-binary_arm64: before_script: - !reference [.retrieve_linux_go_deps] script: - - source /root/.bashrc - inv check-go-version - inv -e agent.build --flavor iot --major-version 7 diff --git a/.gitlab/binary_build/serverless.yml b/.gitlab/binary_build/serverless.yml index 8861528211fab..fa626581965be 100644 --- a/.gitlab/binary_build/serverless.yml +++ b/.gitlab/binary_build/serverless.yml @@ -5,7 +5,6 @@ - !reference [.except_mergequeue] - when: on_success before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv check-go-version diff --git a/.gitlab/choco_build/choco_build.yml b/.gitlab/choco_build/choco_build.yml index d14e832b9b15b..359cbf7a90d51 100644 --- a/.gitlab/choco_build/choco_build.yml +++ b/.gitlab/choco_build/choco_build.yml @@ -6,7 +6,7 @@ windows_choco_offline_7_x64: rules: !reference [.manual] - stage: choco_build + stage: choco_and_install_script_build tags: ["runner:windows-docker", "windowsversion:1809"] needs: ["windows_msi_and_bosh_zip_x64-a7"] variables: @@ -28,7 +28,7 @@ windows_choco_offline_7_x64: windows_choco_online_7_x64: rules: !reference [.on_deploy_stable_or_beta_repo_branch_manual] - stage: choco_build + stage: choco_and_install_script_build tags: ["runner:windows-docker", "windowsversion:1809"] needs: ["deploy_staging_windows_tags-7"] variables: diff --git a/.gitlab/choco_deploy/choco_deploy.yml b/.gitlab/choco_deploy/choco_deploy.yml index 86b63e251e362..56e66603fe9af 100644 --- a/.gitlab/choco_deploy/choco_deploy.yml +++ b/.gitlab/choco_deploy/choco_deploy.yml @@ -4,13 +4,17 @@ publish_choco_7_x64: rules: !reference [.on_deploy_stable_or_beta_repo_branch_manual] - stage: choco_deploy + stage: choco_and_install_script_deploy tags: ["runner:windows-docker", "windowsversion:1809"] needs: ["windows_choco_online_7_x64"] variables: ARCH: "x64" before_script: - - $chocolateyApiKey=$(& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:CHOCOLATEY_API_KEY_SSM_NAME") + - $tmpfile = [System.IO.Path]::GetTempFileName() + - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:CHOCOLATEY_API_KEY" -tempFile "$tmpfile") + - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - $chocolateyApiKey=$(cat "$tmpfile") + - Remove-Item "$tmpfile" script: - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' - $ErrorActionPreference = "Stop" diff --git a/.gitlab/common/container_publish_job_templates.yml b/.gitlab/common/container_publish_job_templates.yml index ed119645aa883..19786f15fa81d 100644 --- a/.gitlab/common/container_publish_job_templates.yml +++ b/.gitlab/common/container_publish_job_templates.yml @@ -13,8 +13,7 @@ IMG_VARIABLES: "" IMG_SIGNING: "" script: # We can't use the 'trigger' keyword on manual jobs, otherwise they can't be run if the pipeline fails and is retried - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - | if [[ "$BUCKET_BRANCH" == "nightly" && ( "$IMG_SOURCES" =~ "$SRC_AGENT" || "$IMG_SOURCES" =~ "$SRC_DCA" || "$IMG_SOURCES" =~ "$SRC_CWS_INSTRUMENTATION" || "$IMG_VARIABLES" =~ "$SRC_AGENT" || "$IMG_VARIABLES" =~ "$SRC_DCA" || "$IMG_VARIABLES" =~ "$SRC_CWS_INSTRUMENTATION" ) ]]; then export ECR_RELEASE_SUFFIX="-nightly" diff --git a/.gitlab/common/shared.yml b/.gitlab/common/shared.yml index 1df106e9b4c08..b00293abc750e 100644 --- a/.gitlab/common/shared.yml +++ b/.gitlab/common/shared.yml @@ -21,33 +21,37 @@ .setup_deb_signing_key: &setup_deb_signing_key - set +x - - DEB_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY_SSM_NAME) - - printf -- "${DEB_GPG_KEY}" | gpg --import --batch - - export DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE_SSM_NAME) + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE) || exit $?; export DEB_SIGNING_PASSPHRASE .setup_macos_github_app: # GitHub App rate-limits are per-app. # This balances the requests made to GitHub between the two apps we have set up. - | if [[ "$(( RANDOM % 2 ))" == "1" ]]; then - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_SSM_NAME) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_SSM_NAME) + GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_1 key_b64) || exit $?; export GITHUB_KEY_B64 + GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_1 app_id) || exit $?; export GITHUB_APP_ID + GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_1 installation_id) || exit $?; export GITHUB_INSTALLATION_ID echo "Using GitHub App instance 1" else - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2_SSM_NAME) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2_SSM_NAME) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2_SSM_NAME) + GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_2 key_b64) || exit $?; export GITHUB_KEY_B64 + GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_2 app_id) || exit $?; export GITHUB_APP_ID + GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_2 installation_id) || exit $?; export GITHUB_INSTALLATION_ID echo "Using GitHub App instance 2" fi .setup_agent_github_app: - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_KEY_SSM_NAME) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_APP_ID_SSM_NAME) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_GITHUB_INSTALLATION_ID_SSM_NAME) + - GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_APP key_b64) || exit $?; export GITHUB_KEY_B64 + - GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_APP app_id) || exit $?; export GITHUB_APP_ID + - GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_APP installation_id) || exit $?; export GITHUB_INSTALLATION_ID - echo "Using agent GitHub App" # Install `dd-pkg` and lint packages produced by Omnibus, supports only deb and rpm packages .lint_linux_packages: - curl -sSL "https://dd-package-tools.s3.amazonaws.com/dd-pkg/${DD_PKG_VERSION}/dd-pkg_Linux_${DD_PKG_ARCH}.tar.gz" | tar -xz -C /usr/local/bin dd-pkg - find $OMNIBUS_PACKAGE_DIR -iregex '.*\.\(deb\|rpm\)' | xargs dd-pkg lint + - | + if [ -n "$PACKAGE_REQUIRED_FILES_LIST" ]; then + find $OMNIBUS_PACKAGE_DIR \( -name '*.deb' -or -name '*.rpm' \) -a -not -name '*-dbg[_-]*' | xargs dd-pkg check-files --required-files ${PACKAGE_REQUIRED_FILES_LIST} + fi diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index 6e55837cfb724..9c368f5003689 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: c9ee795ec752 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0e2d625838fc diff --git a/.gitlab/container_build/docker_linux.yml b/.gitlab/container_build/docker_linux.yml index d12b6894b57de..c4c0bad88d052 100644 --- a/.gitlab/container_build/docker_linux.yml +++ b/.gitlab/container_build/docker_linux.yml @@ -13,8 +13,9 @@ fi - TARGET_TAG=${IMAGE}${ECR_RELEASE_SUFFIX}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}$TAG_SUFFIX-$ARCH # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_RO user) || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_RO token | docker login --username "$DOCKER_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi # Build image, use target none label to avoid replication - docker buildx build --no-cache --push --pull --platform linux/$ARCH --build-arg CIBUILD=true --build-arg GENERAL_ARTIFACTS_CACHE_BUCKET_URL=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL} $BUILD_ARG --build-arg DD_GIT_REPOSITORY_URL=https://github.com/DataDog/datadog-agent --build-arg DD_GIT_COMMIT_SHA=${CI_COMMIT_SHA} --file $BUILD_CONTEXT/Dockerfile --tag ${TARGET_TAG} --label "org.opencontainers.image.created=$(date --rfc-3339=seconds)" --label "org.opencontainers.image.authors=Datadog " --label "org.opencontainers.image.source=https://github.com/DataDog/datadog-agent" --label "org.opencontainers.image.version=$(inv agent.version)" --label "org.opencontainers.image.revision=${CI_COMMIT_SHA}" --label "org.opencontainers.image.vendor=Datadog, Inc." --label "target=none" $BUILD_CONTEXT # Squash image diff --git a/.gitlab/container_build/docker_windows.yml b/.gitlab/container_build/docker_windows.yml index 6d7a365b22ee4..af2a6a84bfde6 100644 --- a/.gitlab/container_build/docker_windows.yml +++ b/.gitlab/container_build/docker_windows.yml @@ -29,8 +29,8 @@ -e SIGN_WINDOWS_DD_WCS=true -e CI_PIPELINE_ID=${CI_PIPELINE_ID} -e CI_PROJECT_NAME=${CI_PROJECT_NAME} - -e DOCKER_REGISTRY_LOGIN_SSM_KEY=${DOCKER_REGISTRY_LOGIN_SSM_KEY} - -e DOCKER_REGISTRY_PWD_SSM_KEY=${DOCKER_REGISTRY_PWD_SSM_KEY} + -e DOCKER_REGISTRY_LOGIN=${DOCKER_REGISTRY_LOGIN} + -e DOCKER_REGISTRY_PWD=${DOCKER_REGISTRY_PWD} -v "$(Get-Location):C:\mnt" -v \\.\pipe\docker_engine:\\.\pipe\docker_engine 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_${Env:VARIANT}_x64${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} powershell diff --git a/.gitlab/container_build/fakeintake.yml b/.gitlab/container_build/fakeintake.yml index 334d6e73e78ca..e0c4ec8c30400 100644 --- a/.gitlab/container_build/fakeintake.yml +++ b/.gitlab/container_build/fakeintake.yml @@ -15,7 +15,8 @@ docker_build_fakeintake: BUILD_CONTEXT: . script: # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_RO user) || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_RO token | docker login --username "$DOCKER_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi - docker buildx build --push --pull --platform ${PLATFORMS} --file ${DOCKERFILE} --tag ${TARGET} $BUILD_CONTEXT retry: 2 diff --git a/.gitlab/container_scan/container_scan.yml b/.gitlab/container_scan/container_scan.yml index cf8cc90908d61..19a9188283576 100644 --- a/.gitlab/container_scan/container_scan.yml +++ b/.gitlab/container_scan/container_scan.yml @@ -15,6 +15,7 @@ scan_nightly-dogstatsd: IMG_REGISTRIES: dev IMG_SOURCES: ${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64 IMG_DESTINATIONS: dogstatsd-scan:${BUCKET_BRANCH} + timeout: 15m scan_nightly-a6: extends: .docker_publish_job_definition diff --git a/.gitlab/deploy_containers/deploy_containers_a7.yml b/.gitlab/deploy_containers/deploy_containers_a7.yml index b63cd6cc74972..05eb4b531708c 100644 --- a/.gitlab/deploy_containers/deploy_containers_a7.yml +++ b/.gitlab/deploy_containers/deploy_containers_a7.yml @@ -25,8 +25,7 @@ include: stage: deploy_containers dependencies: [] before_script: - - source /root/.bashrc - - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)"; fi + - if [[ "$VERSION" == "" ]]; then VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)" || exit $?; fi - export IMG_BASE_SRC="${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - export IMG_LINUX_SOURCES="${IMG_BASE_SRC}-7${JMX}-amd64,${IMG_BASE_SRC}-7${JMX}-arm64" - export IMG_WINDOWS_SOURCES="${IMG_BASE_SRC}-7${JMX}-win1809${FLAVOR}-amd64,${IMG_BASE_SRC}-7${JMX}-winltsc2022${FLAVOR}-amd64" @@ -46,6 +45,16 @@ include: - "-servercore" - "-linux" +.deploy_containers-a7-base-ot: + extends: .docker_publish_job_definition + stage: deploy_containers + rules: + - when: manual + allow_failure: true + variables: + AGENT_REPOSITORY: agent + IMG_REGISTRIES: public + dependencies: [] deploy_containers-a7: extends: .deploy_containers-a7_external @@ -66,8 +75,7 @@ deploy_containers-dogstatsd: !reference [.manual_on_deploy_auto_on_rc] dependencies: [] before_script: - - source /root/.bashrc - - export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)" + - VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)" || exit $? - export IMG_SOURCES="${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64" - export IMG_DESTINATIONS="${DSD_REPOSITORY}:${VERSION}" @@ -88,24 +96,16 @@ deploy_containers-a7_internal-rc: deploy_containers-ot: - extends: .docker_publish_job_definition - stage: deploy_containers - rules: - - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: agent - IMG_REGISTRIES: public - VERSION: 7 - dependencies: [] + extends: .deploy_containers-a7-base-ot before_script: - - source /root/.bashrc + - if [[ "$VERSION" == "" ]]; then VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)" || exit $?; fi + - export IMG_SOURCES="${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta${JMX}-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta${JMX}-arm64" + - export IMG_DESTINATIONS="${AGENT_REPOSITORY}:${VERSION}-ot-beta${JMX}" parallel: matrix: - - IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-arm64 - IMG_DESTINATIONS: ${AGENT_REPOSITORY}:${VERSION}-ot-beta - - IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-jmx-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-jmx-arm64 - IMG_DESTINATIONS: ${AGENT_REPOSITORY}:${VERSION}-ot-beta-jmx + - JMX: + - "" + - "-jmx" # @@ -133,7 +133,6 @@ deploy_containers_latest-a7: IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64" IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-servercore-jmx,${AGENT_REPOSITORY}:latest-servercore-jmx - deploy_containers_latest-a7_internal: extends: .docker_publish_job_definition stage: deploy_containers @@ -146,7 +145,6 @@ deploy_containers_latest-a7_internal: IMG_SOURCES: "%BASE%-amd64,%BASE%-arm64,%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64" IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-jmx - deploy_containers_latest-dogstatsd: extends: .docker_publish_job_definition stage: deploy_containers @@ -156,3 +154,15 @@ deploy_containers_latest-dogstatsd: variables: IMG_SOURCES: ${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64 IMG_DESTINATIONS: ${DSD_REPOSITORY}:7,${DSD_REPOSITORY}:latest + +deploy_containers_latest-ot: + extends: .deploy_containers-a7-base-ot + variables: + VERSION: 7 + parallel: + matrix: + - IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-arm64 + IMG_DESTINATIONS: ${AGENT_REPOSITORY}:${VERSION}-ot-beta + - IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-jmx-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-jmx-arm64 + IMG_DESTINATIONS: ${AGENT_REPOSITORY}:${VERSION}-ot-beta-jmx + diff --git a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml index 47b3566f320a1..330c60bb239ea 100644 --- a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml +++ b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml @@ -11,8 +11,7 @@ include: stage: deploy_cws_instrumentation dependencies: [] before_script: - - source /root/.bashrc - - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi + - if [[ "$VERSION" == "" ]]; then VERSION="$(inv agent.version --major-version 7 --url-safe)" || exit $?; fi - if [[ "$CWS_INSTRUMENTATION_REPOSITORY" == "" ]]; then export CWS_INSTRUMENTATION_REPOSITORY="cws-instrumentation"; fi - export IMG_BASE_SRC="${SRC_CWS_INSTRUMENTATION}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - export IMG_SOURCES="${IMG_BASE_SRC}-amd64,${IMG_BASE_SRC}-arm64" diff --git a/.gitlab/deploy_dca/deploy_dca.yml b/.gitlab/deploy_dca/deploy_dca.yml index 5db81a7297552..63ef1ed56d74c 100644 --- a/.gitlab/deploy_dca/deploy_dca.yml +++ b/.gitlab/deploy_dca/deploy_dca.yml @@ -15,8 +15,7 @@ include: - job: "docker_build_cluster_agent_arm64" artifacts: false before_script: - - source /root/.bashrc - - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi + - if [[ "$VERSION" == "" ]]; then VERSION="$(inv agent.version --major-version 7 --url-safe)" || exit $?; fi - if [[ "$CLUSTER_AGENT_REPOSITORY" == "" ]]; then export CLUSTER_AGENT_REPOSITORY="cluster-agent"; fi - export IMG_BASE_SRC="${SRC_DCA}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - export IMG_SOURCES="${IMG_BASE_SRC}-amd64,${IMG_BASE_SRC}-arm64" diff --git a/.gitlab/deploy_packages/nix.yml b/.gitlab/deploy_packages/nix.yml index 4baead2992b04..8a7dd50335f73 100644 --- a/.gitlab/deploy_packages/nix.yml +++ b/.gitlab/deploy_packages/nix.yml @@ -172,7 +172,7 @@ deploy_staging_dsd: needs: ["build_dogstatsd-binary_x64"] script: - $S3_CP_CMD $S3_ARTIFACTS_URI/dogstatsd/dogstatsd ./dogstatsd - - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) + - PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) || exit $? - $S3_CP_CMD ./dogstatsd $S3_DSD6_URI/linux/dogstatsd-$PACKAGE_VERSION --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 # deploy iot-agent x64 binary to staging bucket @@ -185,7 +185,7 @@ deploy_staging_iot_agent: needs: ["build_iot_agent-binary_x64"] script: - $S3_CP_CMD $S3_ARTIFACTS_URI/iot/agent ./agent - - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) + - PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) || exit $? - $S3_CP_CMD ./agent $S3_DSD6_URI/linux/iot/agent-$PACKAGE_VERSION --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 # Datadog Installer diff --git a/.gitlab/deploy_packages/oci.yml b/.gitlab/deploy_packages/oci.yml index da2dd66b2e11e..b3e39afe86fb1 100644 --- a/.gitlab/deploy_packages/oci.yml +++ b/.gitlab/deploy_packages/oci.yml @@ -15,7 +15,8 @@ include: - ls $OMNIBUS_PACKAGE_DIR script: - set +x - - export VERSION=$(inv agent.version --url-safe)-1 + - !reference [.retrieve_linux_go_tools_deps] + - VERSION="$(inv agent.version --url-safe)-1" || exit $? - git config --global url."https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/".insteadOf "https://github.com/DataDog/" - go env -w GOPRIVATE="github.com/DataDog/*" - export PATH=$PATH:$(go env GOPATH)/bin @@ -28,12 +29,12 @@ include: deploy_agent_oci: extends: ".deploy_packages_oci" - needs: [ "agent_oci" ] + needs: [ "agent_oci", "go_tools_deps"] variables: OCI_PRODUCT: "datadog-agent" deploy_installer_oci: extends: ".deploy_packages_oci" - needs: [ "installer_oci" ] + needs: [ "installer_oci", "go_tools_deps" ] variables: OCI_PRODUCT: "datadog-installer" diff --git a/.gitlab/deploy_packages/windows.yml b/.gitlab/deploy_packages/windows.yml index c119e50f48048..36e2ffe349060 100644 --- a/.gitlab/deploy_packages/windows.yml +++ b/.gitlab/deploy_packages/windows.yml @@ -84,4 +84,5 @@ deploy_installer_packages_windows-x64: --include "datadog-installer-*-1-x86_64.msi" --include "datadog-installer-*-1-x86_64.debug.zip" --include "datadog-installer-*-1-x86_64.zip" + --include "datadog-installer-*-1-x86_64.exe" $OMNIBUS_PACKAGE_DIR $S3_RELEASE_INSTALLER_ARTIFACTS_URI/msi/x86_64/ diff --git a/.gitlab/deploy_packages/winget.yml b/.gitlab/deploy_packages/winget.yml index 0d0cd80b7b981..f28f946b1fb0c 100644 --- a/.gitlab/deploy_packages/winget.yml +++ b/.gitlab/deploy_packages/winget.yml @@ -10,7 +10,11 @@ publish_winget_7_x64: variables: ARCH: "x64" before_script: - - $wingetPat=$(& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" $Env:WINGET_PAT_SSM_NAME) + - $tmpfile = [System.IO.Path]::GetTempFileName() + - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:WINGET_PAT" -tempFile "$tmpfile") + - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - $wingetPat=$(cat "$tmpfile") + - Remove-Item "$tmpfile" script: - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' - $ErrorActionPreference = "Stop" diff --git a/.gitlab/deps_fetch/deps_fetch.yml b/.gitlab/deps_fetch/deps_fetch.yml index 617a272ab2cb3..b574ee87529a9 100644 --- a/.gitlab/deps_fetch/deps_fetch.yml +++ b/.gitlab/deps_fetch/deps_fetch.yml @@ -4,15 +4,15 @@ # to reuse them in further jobs that need them. .retrieve_linux_go_deps: - - mkdir -p $GOPATH/pkg/mod && tar xJf modcache.tar.xz -C $GOPATH/pkg/mod + - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache.tar.xz -C $GOPATH/pkg/mod/cache - rm -f modcache.tar.xz .retrieve_linux_go_tools_deps: - - mkdir -p $GOPATH/pkg/mod && tar xJf modcache_tools.tar.xz -C $GOPATH/pkg/mod + - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache_tools.tar.xz -C $GOPATH/pkg/mod/cache - rm -f modcache_tools.tar.xz .retrieve_linux_go_e2e_deps: - - mkdir -p $GOPATH/pkg/mod && tar xJf modcache_e2e.tar.xz -C $GOPATH/pkg/mod + - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache_e2e.tar.xz -C $GOPATH/pkg/mod/cache - rm -f modcache_e2e.tar.xz .cache: @@ -40,29 +40,39 @@ go_deps: # If the cache already contains the dependencies, don't redownload them # but still provide the artifact that's expected for the other jobs to run - if [ -f modcache.tar.xz ]; then exit 0; fi - - source /root/.bashrc - inv -e deps --verbose - - cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache.tar.xz . + - inv -e install-tools + - cd $GOPATH/pkg/mod/cache/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache.tar.xz . artifacts: expire_in: 1 day paths: - $CI_PROJECT_DIR/modcache.tar.xz cache: + # The `cache:key:files` only accepts up to two path ([docs](https://docs.gitlab.com/ee/ci/yaml/#cachekeyfiles)). + # Ideally, we should also include the https://github.com/DataDog/datadog-agent/blob/main/.custom-gcl.yml file to + # avoid issues if a plugin is added in one PR and enabled in another. However, we decided to accept this limitation + # because the probability for this to happen is very low and go mod files are modified frequently so the risk of + # failing a job because of a network issue when building the custom binary is very low, but still exists. - key: files: - - go.mod - - ./**/go.mod - prefix: "go_deps" + - \**/go.mod + - .gitlab/deps_fetch/deps_fetch.yml + prefix: "go_deps_modcache" paths: - modcache.tar.xz + timeout: 35m go_tools_deps: extends: .cache script: - if [ -f modcache_tools.tar.xz ]; then exit 0; fi - - source /root/.bashrc - - inv -e download-tools - - cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_tools.tar.xz . + - inv -e install-tools + # Partial mitigation for #incident-30779. It will cache datadog-packages , but if the upstream version is updated without the key cache changing, the cache will not work until the key cache changes. + # Long term solution will be to provide the datadog-packages as a binary hosted internally + - git config --global url."https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/".insteadOf "https://github.com/DataDog/" + - go env -w GOPRIVATE="github.com/DataDog/*" + - go install github.com/DataDog/datadog-packages/cmd/datadog-package@latest + - cd $GOPATH/pkg/mod/cache/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_tools.tar.xz . artifacts: expire_in: 1 day paths: @@ -71,7 +81,8 @@ go_tools_deps: - key: files: - ./**/go.mod - prefix: "go_tools_deps" + - .gitlab/deps_fetch/deps_fetch.yml + prefix: "go_tools_deps_modcache" paths: - modcache_tools.tar.xz @@ -79,9 +90,8 @@ go_e2e_deps: extends: .cache script: - if [ -f modcache_e2e.tar.xz ]; then exit 0; fi - - source /root/.bashrc - inv -e new-e2e-tests.deps - - cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_e2e.tar.xz . + - cd $GOPATH/pkg/mod/cache/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_e2e.tar.xz . artifacts: expire_in: 1 day paths: @@ -90,6 +100,7 @@ go_e2e_deps: - key: files: - ./test/new-e2e/go.mod - prefix: "go_e2e_deps" + - .gitlab/deps_fetch/deps_fetch.yml + prefix: "go_e2e_deps_modcache" paths: - modcache_e2e.tar.xz diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index 257e8d255a35b..10088c7490261 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -1,7 +1,6 @@ --- # e2e stage # Contains test jobs based on the new-e2e tests framework - .new_e2e_template: stage: e2e image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES @@ -12,19 +11,22 @@ - !reference [.retrieve_linux_go_e2e_deps] # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE_SSM_NAME >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci # Now all `aws` commands target the agent-qa profile - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_PUBLIC_KEY_RSA_SSM_NAME > $E2E_PUBLIC_KEY_PATH - - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY_RSA_SSM_NAME > $E2E_PRIVATE_KEY_PATH + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_PUBLIC_KEY_RSA > $E2E_PUBLIC_KEY_PATH || exit $? + - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY_RSA > $E2E_PRIVATE_KEY_PATH || exit $? # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" # Setup Azure credentials. https://www.pulumi.com/registry/packages/azure-native/installation-configuration/#set-configuration-using-pulumi-config # The app is called `agent-e2e-tests` - - export ARM_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_CLIENT_ID) - - export ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_CLIENT_SECRET) - - export ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_TENANT_ID) - - export ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID) + - ARM_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_ID) || exit $?; export ARM_CLIENT_ID + - ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_SECRET) || exit $?; export ARM_CLIENT_SECRET + - ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_TENANT_ID) || exit $?; export ARM_TENANT_ID + - ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID) || exit $?; export ARM_SUBSCRIPTION_ID + # Setup GCP credentials. https://www.pulumi.com/registry/packages/gcp/installation-configuration/ + # The service account is called `agent-e2e-tests` + - GOOGLE_CREDENTIALS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_GCP_CREDENTIALS) || exit $?; export GOOGLE_CREDENTIALS # Generate external links to CI VISIBILITY, used by artifacts:reports:annotations - inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH variables: @@ -137,7 +139,7 @@ new-e2e-agent-subcommands: - EXTRA_PARAMS: --run "Test(Linux|Windows)StatusSuite" - EXTRA_PARAMS: --run "Test(Linux|Windows)HealthSuite" - EXTRA_PARAMS: --run "Test(Linux|Windows)ConfigSuite" - - EXTRA_PARAMS: --run "Test(Linux|Windows)HostnameSuite" + - EXTRA_PARAMS: --run "Test(Linux|Windows|LinuxAzure)HostnameSuite" - EXTRA_PARAMS: --run "Test(Linux|Windows)DiagnoseSuite" - EXTRA_PARAMS: --run "Test(Linux|Windows)ConfigCheckSuite" - EXTRA_PARAMS: --run "Test(Linux|Windows)FlareSuite" @@ -158,6 +160,20 @@ new-e2e-windows-service-test: TEAM: windows-agent EXTRA_PARAMS: --run TestServiceBehavior +# Temporary job for hunting a crash +new-e2e-windows-service-test-nofim: + extends: .new_e2e_template + needs: + - !reference [.needs_new_e2e_template] + - deploy_windows_testing-a7 + rules: + - !reference [.on_windows_service_or_e2e_changes] + - !reference [.manual] + variables: + TARGETS: ./tests/windows/service-test + TEAM: windows-agent + EXTRA_PARAMS: --run TestNoFIMServiceBehavior + new-e2e-language-detection: extends: .new_e2e_template_needs_deb_x64 rules: @@ -272,6 +288,7 @@ new-e2e-orchestrator: variables: TARGETS: ./tests/orchestrator TEAM: container-app + timeout: 55m new-e2e-apm: extends: .new_e2e_template @@ -305,6 +322,20 @@ new-e2e-installer: - deploy_rpm_testing-a7_x64 - deploy_suse_rpm_testing_arm64-a7 - deploy_suse_rpm_testing_x64-a7 + - qa_installer_oci + - qa_agent_oci + variables: + TARGETS: ./tests/installer/unix + TEAM: fleet + FLEET_INSTALL_METHOD: "install_script" + +new-e2e-installer-windows: + extends: .new_e2e_template + rules: + - !reference [.on_installer_or_e2e_changes] + - !reference [.manual] + needs: + - !reference [.needs_new_e2e_template] - deploy_windows_testing-a7 - qa_installer_oci - qa_agent_oci @@ -313,14 +344,14 @@ new-e2e-installer: # Must run before new_e2e_template changes the aws profile # Note: this is similar to the WINDOWS_AGENT_VERSION in new-e2e_windows_msi but this job is running cross platforms # Note 2: new_e2e_template does not define AGENT_MAJOR_VERSION, so define it as 7 below. - - export CURRENT_AGENT_VERSION=$(invoke agent.version --major-version 7) + - CURRENT_AGENT_VERSION=$(invoke agent.version --major-version 7) || exit $?; export CURRENT_AGENT_VERSION - export STABLE_AGENT_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/agent-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - export STABLE_INSTALLER_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/installer-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - !reference [.new_e2e_template, before_script] variables: - TARGETS: ./tests/installer + TARGETS: ./tests/installer/windows TEAM: fleet - FLEET_INSTALL_METHOD: "install_script" + FLEET_INSTALL_METHOD: "windows" new-e2e-installer-ansible: extends: .new_e2e_template @@ -329,18 +360,16 @@ new-e2e-installer-ansible: - !reference [.manual] needs: - !reference [.needs_new_e2e_template] - - new-e2e-installer - before_script: - # CURRENT_AGENT_VERSION is used to verify the installed agent version - # Must run before new_e2e_template changes the aws profile - # Note: this is similar to the WINDOWS_AGENT_VERSION in new-e2e_windows_msi but this job is running cross platforms - # Note 2: new_e2e_template does not define AGENT_MAJOR_VERSION, so define it as 7 below. - - export CURRENT_AGENT_VERSION=$(invoke agent.version --major-version 7) - - export STABLE_AGENT_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/agent-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - - export STABLE_INSTALLER_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/installer-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - - !reference [.new_e2e_template, before_script] + - deploy_deb_testing-a7_arm64 + - deploy_deb_testing-a7_x64 + - deploy_rpm_testing-a7_arm64 + - deploy_rpm_testing-a7_x64 + - deploy_suse_rpm_testing_arm64-a7 + - deploy_suse_rpm_testing_x64-a7 + - qa_installer_oci + - qa_agent_oci variables: - TARGETS: ./tests/installer + TARGETS: ./tests/installer/unix TEAM: fleet FLEET_INSTALL_METHOD: "ansible" @@ -435,10 +464,27 @@ new-e2e-package-signing-suse-a7-x86_64: - .new-e2e_package_signing rules: !reference [.on_default_new_e2e_tests] +new-e2e-cspm: + extends: .new_e2e_template + rules: + - !reference [.on_cspm_or_e2e_changes] + - !reference [.manual] + needs: + - !reference [.needs_new_e2e_template] + - qa_agent + - qa_dca + variables: + TARGETS: ./tests/cspm + TEAM: cspm + timeout: 35m + generate-flakes-finder-pipeline: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES stage: e2e - rules: !reference [.on_deploy_nightly_repo_branch] + rules: + - !reference [.except_disable_e2e_tests] + - !reference [.on_deploy_nightly_repo_branch] + - !reference [.manual] needs: - deploy_deb_testing-a7_arm64 - deploy_deb_testing-a7_x64 @@ -453,9 +499,10 @@ generate-flakes-finder-pipeline: - qa_dca - qa_dogstatsd - qa_agent + - qa_agent_ot tags: ["arch:amd64"] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN - inv -e testwasher.generate-flake-finder-pipeline artifacts: paths: @@ -464,7 +511,10 @@ generate-flakes-finder-pipeline: trigger-flakes-finder: stage: e2e needs: [generate-flakes-finder-pipeline] - rules: !reference [.on_deploy_nightly_repo_branch] + rules: + - !reference [.except_disable_e2e_tests] + - !reference [.on_deploy_nightly_repo_branch] + - !reference [.manual] variables: PARENT_PIPELINE_ID: $CI_PIPELINE_ID PARENT_COMMIT_SHA: $CI_COMMIT_SHORT_SHA diff --git a/.gitlab/e2e_install_packages/common.yml b/.gitlab/e2e_install_packages/common.yml index 90777eb5ec269..4684fafc3cd9d 100644 --- a/.gitlab/e2e_install_packages/common.yml +++ b/.gitlab/e2e_install_packages/common.yml @@ -2,6 +2,7 @@ rules: !reference [.on_kitchen_tests] #TODO: Change when migration is complete to another name without 'kitchen' variables: AGENT_MAJOR_VERSION: 6 + SHOULD_RUN_IN_FLAKES_FINDER: "false" .new-e2e_agent_a7: rules: !reference [.on_kitchen_tests] #TODO: Change when migration is complete to another name without 'kitchen' @@ -33,7 +34,7 @@ - START_MAJOR_VERSION: [5, 6] END_MAJOR_VERSION: [6] script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY_SSM_NAME) + - DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY) || exit $?; export DATADOG_AGENT_API_KEY - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION .new-e2e_script_upgrade7: @@ -47,7 +48,7 @@ - START_MAJOR_VERSION: [5, 6, 7] END_MAJOR_VERSION: [7] script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY_SSM_NAME ) + - DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY ) || exit $?; export DATADOG_AGENT_API_KEY - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION .new-e2e_rpm: @@ -57,5 +58,5 @@ TEAM: agent-delivery EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --arch $E2E_ARCH script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $INSTALL_SCRIPT_API_KEY_SSM_NAME) + - DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY) || exit $?; export DATADOG_AGENT_API_KEY - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} diff --git a/.gitlab/e2e_install_packages/windows.yml b/.gitlab/e2e_install_packages/windows.yml index 70603614abca2..43b8a49cfe105 100644 --- a/.gitlab/e2e_install_packages/windows.yml +++ b/.gitlab/e2e_install_packages/windows.yml @@ -10,7 +10,7 @@ before_script: # WINDOWS_AGENT_VERSION is used to verify the installed agent version # Must run before new_e2e_template changes the aws profile - - export WINDOWS_AGENT_VERSION=$(invoke agent.version --major-version $AGENT_MAJOR_VERSION) + - WINDOWS_AGENT_VERSION=$(invoke agent.version --major-version $AGENT_MAJOR_VERSION) || exit $?; export WINDOWS_AGENT_VERSION - !reference [.new_e2e_template, before_script] script: # LAST_STABLE_VERSION is used for upgrade test @@ -24,7 +24,7 @@ before_script: # WINDOWS_AGENT_VERSION is used to verify the installed agent version # Must run before new_e2e_template changes the aws profile - - export WINDOWS_AGENT_VERSION=$(invoke agent.version --major-version $AGENT_MAJOR_VERSION) + - WINDOWS_AGENT_VERSION=$(invoke agent.version --major-version $AGENT_MAJOR_VERSION) || exit $?; export WINDOWS_AGENT_VERSION - !reference [.new_e2e_template, before_script] script: # LAST_STABLE_VERSION is used for upgrade test diff --git a/.gitlab/e2e_k8s/e2e_k8s.yml b/.gitlab/e2e_k8s/e2e_k8s.yml deleted file mode 100644 index 2c3faca866165..0000000000000 --- a/.gitlab/e2e_k8s/e2e_k8s.yml +++ /dev/null @@ -1,82 +0,0 @@ - ---- -# e2e stage -# Jobs with the k8s_e2e template - -.k8s_e2e_template: - stage: e2e_k8s - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/gitlab_agent_deploy$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - dependencies: [] - variables: - LANG: C.UTF-8 - before_script: - - export DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - export DOCKER_REGISTRY_PWD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_DDDEV_SSM_NAME) - -.k8s-e2e-cws-cspm-init: - - set +x - - export DATADOG_AGENT_SITE=datadoghq.com - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_API_KEY_SSM_NAME) - - export DATADOG_AGENT_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_APP_KEY_SSM_NAME) - - export DATADOG_AGENT_RC_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $E2E_TESTS_RC_KEY_SSM_NAME) - -.k8s_e2e_template_needs_dev: - extends: .k8s_e2e_template - needs: - - dev_branch_multiarch-a7 - - dca_dev_branch - -.k8s_e2e_template_dev: - extends: .k8s_e2e_template_needs_dev - script: - - inv -e e2e-tests --agent-image=datadog/agent-dev:${CI_COMMIT_REF_SLUG}-py3 --dca-image=datadog/cluster-agent-dev:${CI_COMMIT_REF_SLUG} --argo-workflow=$ARGO_WORKFLOW - -.k8s_e2e_template_dev_with_cws_cspm_init: - extends: .k8s_e2e_template_needs_dev - script: - - !reference [.k8s-e2e-cws-cspm-init] - - inv -e e2e-tests --agent-image=datadog/agent-dev:${CI_COMMIT_REF_SLUG}-py3 --dca-image=datadog/cluster-agent-dev:${CI_COMMIT_REF_SLUG} --argo-workflow=$ARGO_WORKFLOW - -.k8s_e2e_template_needs_main: - extends: .k8s_e2e_template - needs: - - dev_master-a7 - - dca_dev_master - -.k8s_e2e_template_main_with_cws_cspm_init: - extends: .k8s_e2e_template_needs_main - script: - - !reference [.k8s-e2e-cws-cspm-init] - - inv -e e2e-tests --agent-image=datadog/agent-dev:master-py3 --dca-image=datadog/cluster-agent-dev:master --argo-workflow=$ARGO_WORKFLOW - -.k8s_e2e_template_main: - extends: .k8s_e2e_template_needs_main - script: - - inv -e e2e-tests --agent-image=datadog/agent-dev:master-py3 --dca-image=datadog/cluster-agent-dev:master --argo-workflow=$ARGO_WORKFLOW - -k8s-e2e-cspm-dev: - extends: .k8s_e2e_template_dev_with_cws_cspm_init - rules: !reference [.on_dev_branch_manual] - variables: - ARGO_WORKFLOW: cspm - -k8s-e2e-cspm-main: - extends: .k8s_e2e_template_main_with_cws_cspm_init - rules: !reference [.on_main] - retry: 1 - variables: - ARGO_WORKFLOW: cspm - -k8s-e2e-otlp-dev: - extends: .k8s_e2e_template_dev - rules: !reference [.on_dev_branch_manual] - variables: - ARGO_WORKFLOW: otlp - -k8s-e2e-otlp-main: - extends: .k8s_e2e_template_main - rules: !reference [.on_main] - variables: - ARGO_WORKFLOW: otlp diff --git a/.gitlab/functional_test/oracle.yml b/.gitlab/functional_test/oracle.yml index e170254b42bb5..f1c89e36bf3cc 100644 --- a/.gitlab/functional_test/oracle.yml +++ b/.gitlab/functional_test/oracle.yml @@ -17,7 +17,6 @@ oracle: matrix: - DBMS_VERSION: "21.3.0-xe" before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv oracle.test \ No newline at end of file diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index 3819ff4626d4d..e8da5017e5d77 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -1,5 +1,6 @@ single-machine-performance-regression_detector: stage: functional_test + timeout: 1h10 rules: - !reference [.except_main_or_release_branch] - when: on_success @@ -14,7 +15,11 @@ single-machine-performance-regression_detector: - submission_metadata # for provenance, debugging - ${CI_COMMIT_SHA}-baseline_sha # for provenance, debugging - outputs/report.md # for debugging, also on S3 - - outputs/report.html # for debugging, also on S3 + - outputs/regression_signal.json # for debugging, also on S3 + - outputs/bounds_check_signal.json # for debugging, also on S3 + - outputs/junit.xml # for debugging, also on S3 + - report_as_json_string.txt # for debugging transform to valid JSON string + - pr_comment_payload.json # for debugging PR commenter JSON payload bugs when: always variables: SMP_VERSION: 0.16.0 @@ -33,7 +38,6 @@ single-machine-performance-regression_detector: # Ensure output files exist for artifact downloads step - mkdir outputs # Also needed for smp job sync step - touch outputs/report.md # Will be emitted by smp job sync - - touch outputs/report.html # Will be emitted by smp job sync # Compute merge base of current commit and `main` - git fetch origin - SMP_BASE_BRANCH=$(inv release.get-release-json-value base_branch) @@ -42,20 +46,52 @@ single-machine-performance-regression_detector: - echo "Merge base is ${SMP_MERGE_BASE}" # Setup AWS credentials for single-machine-performance AWS account - AWS_NAMED_PROFILE="single-machine-performance" - - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_ACCOUNT_ID_SSM_NAME) + - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT_ID) || exit $? - SMP_ECR_URL=${SMP_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com - - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_AGENT_TEAM_ID_SSM_NAME) - - SMP_API=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_API_SSM_NAME) - - aws configure set aws_access_key_id $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY_ID_SSM_NAME) --profile ${AWS_NAMED_PROFILE} - - aws configure set aws_secret_access_key $($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SMP_BOT_ACCESS_KEY_SSM_NAME) --profile ${AWS_NAMED_PROFILE} + - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_AGENT_TEAM_ID) || exit $? + - SMP_API=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_API) || exit $? + - SMP_BOT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY_ID) || exit $? + - SMP_BOT_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY) || exit $? + - aws configure set aws_access_key_id "$SMP_BOT_ID" --profile ${AWS_NAMED_PROFILE} + - aws configure set aws_secret_access_key "$SMP_BOT_KEY" --profile ${AWS_NAMED_PROFILE} - aws configure set region us-west-2 --profile ${AWS_NAMED_PROFILE} # Download smp binary and prepare it for use - aws --profile single-machine-performance s3 cp s3://smp-cli-releases/v${SMP_VERSION}/x86_64-unknown-linux-gnu/smp smp - chmod +x smp - BASELINE_SHA="${SMP_MERGE_BASE}" - echo "Computing baseline..." + - echo "Checking if commit ${BASELINE_SHA} is recent enough..." + # Compute four days before now as UNIX timestamp in order to test against SMP ECR expiration policy; + # add an hour as a small correction factor to overestimate time needed for SMP to query and pull the + # image so we don't end up with a hard-to-diagnose bug in which the image expires after checking its + # age in CI, but before SMP pulls the image. + - FOUR_DAYS_BEFORE_NOW=$(date --date="-4 days +1 hour" "+%s") + # Compute UNIX timestamp of potential baseline SHA + - BASELINE_COMMIT_TIME=$(git -c log.showSignature=false show --no-patch --format=%ct ${BASELINE_SHA}) + # If baseline SHA is older than expiration policy, exit with an error + - | # Only 1st line of multiline command echoes, which reduces debuggability, so multiline commands are a maintenance tradeoff + if [[ ${BASELINE_COMMIT_TIME} -le ${FOUR_DAYS_BEFORE_NOW} ]] + then + echo "ERROR: Merge-base of this branch is too old for SMP. Please update your branch by merging an up-to-date main branch into your branch or by rebasing it on an up-to-date main branch." + exit 1 + fi + - echo "Commit ${BASELINE_SHA} is recent enough" - echo "Checking if image exists for commit ${BASELINE_SHA}..." - - while [[ ! $(aws ecr describe-images --profile single-machine-performance --registry-id "${SMP_ACCOUNT_ID}" --repository-name "${SMP_AGENT_TEAM_ID}-agent" --image-ids imageTag="${BASELINE_SHA}-7-amd64") ]]; do echo "No image exists for ${BASELINE_SHA} - checking predecessor of ${BASELINE_SHA} next"; BASELINE_SHA=$(git rev-parse ${BASELINE_SHA}^); echo "Checking if image exists for commit ${BASELINE_SHA}..."; done + - | + while [[ ! $(aws ecr describe-images --profile single-machine-performance --registry-id "${SMP_ACCOUNT_ID}" --repository-name "${SMP_AGENT_TEAM_ID}-agent" --image-ids imageTag="${BASELINE_SHA}-7-amd64") ]] + do + echo "No image exists for ${BASELINE_SHA} - checking predecessor of ${BASELINE_SHA} next" + BASELINE_SHA=$(git rev-parse ${BASELINE_SHA}^) + echo "Checking if commit ${BASELINE_SHA} is recent enough..." + BASELINE_COMMIT_TIME=$(git -c log.showSignature=false show --no-patch --format=%ct ${BASELINE_SHA}) + if [[ ${BASELINE_COMMIT_TIME} -le ${FOUR_DAYS_BEFORE_NOW} ]] + then + echo "ERROR: Merge-base of this branch is too old for SMP. Please update your branch by merging an up-to-date main branch into your branch or by rebasing it on an up-to-date main branch." + exit 1 + fi + echo "Commit ${BASELINE_SHA} is recent enough" + echo "Checking if image exists for commit ${BASELINE_SHA}..." + done - echo "Image exists for commit ${BASELINE_SHA}" - echo "Baseline SHA is ${BASELINE_SHA}" - echo -n "${BASELINE_SHA}" > "${CI_COMMIT_SHA}-baseline_sha" @@ -90,9 +126,44 @@ single-machine-performance-regression_detector: # space characters. This avoids # https://gitlab.com/gitlab-org/gitlab/-/issues/217231. - cat outputs/report.md | sed "s/^\$/$(echo -ne '\uFEFF\u00A0\u200B')/g" - - !reference [.install_pr_commenter] - # Post HTML report to GitHub - - cat outputs/report.md | /usr/local/bin/pr-commenter --for-pr="$CI_COMMIT_REF_NAME" --header="Regression Detector" + # Download auth tool; see + # https://github.com/DataDog/dd-source/tree/8f80b7ef031839b0b11b4a70b9067d6142f3dd5b/domains/devex/ci/authanywhere + - curl -Lo ./authanywhere binaries.ddbuild.io/dd-source/authanywhere/LATEST/authanywhere-linux-amd64 + - chmod u+x ./authanywhere + # We need to transform the Markdown report into a valid JSON string (without + # quotes) in order to pass a well-formed payload to the PR commenting + # service. Note that on macOS, the "-z" flag is invalid for `sed` (but + # should be fine for GNU `sed`). We need to use `sed` to escape newlines + # because JSON does not permit (raw) newlines in strings. We use the "-z" + # option with `sed` because that option treats its input as + # NUL-character-separated (i.e., '\0'-separated, the zero-byte character), + # so `sed` does not interpret its input as newline-delimited. We also need + # to escape double quotes to distinguish literal quotes in the report from + # the double quotes that delimit the value of the "message" field in the + # payload. + - cat outputs/report.md | sed -z 's/\n/\\n/g' | sed -z 's/"/\\"/g' > report_as_json_string.txt + - cat report_as_json_string.txt + # Transforming the Markdown report to a valid JSON string is easy to foul + # up, so to make debugging easier, we store the payload in a variable to + # help debugging. + - PR_COMMENT_JSON_PAYLOAD='{"org":"DataDog", "repo":"datadog-agent", "commit":"'"${CI_COMMIT_SHA}"'", "header":"Regression Detector", "message":"'"$(cat report_as_json_string.txt)"'"}' + - printf "%s\n" "PR comment JSON payload:${PR_COMMENT_JSON_PAYLOAD}" + - printf "%s\n" "${PR_COMMENT_JSON_PAYLOAD}" > pr_comment_payload.json + # Craft an HTTPS request to pr-commenter service to post Markdown report to + # GitHub, per + # https://github.com/DataDog/dd-source/tree/7c941f527fb9c44a73433c7dd0a090d92be7deb4/domains/devex/codex/apps/apis/pr-commenter + - | + curl https://pr-commenter.us1.ddbuild.io/internal/cit/pr-comment \ + -H "$(./authanywhere)" \ + -H "X-DdOrigin: curl" \ + -X PATCH \ + -d "${PR_COMMENT_JSON_PAYLOAD}" + # Upload JUnit XML outside of Agent CI's tooling because the `junit_upload` + # invoke task has additional logic that does not seem to apply well to SMP's + # JUnit XML. Agent CI seems to use `datadog-agent` as the service name when + # uploading JUnit XML, so the upload command below respects that convention. + - DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/fetch_secret.sh "$API_KEY_ORG2")" || exit $?; export DATADOG_API_KEY + - datadog-ci junit upload --service datadog-agent outputs/junit.xml # Finally, exit 1 if the job signals a regression else 0. - RUST_LOG="${RUST_LOG}" ./smp --team-id ${SMP_AGENT_TEAM_ID} --api-base ${SMP_API} --aws-named-profile ${AWS_NAMED_PROFILE} job result diff --git a/.gitlab/install_script_testing/install_script_testing.yml b/.gitlab/install_script_testing/install_script_testing.yml index 45f93afe64309..403625fb08085 100644 --- a/.gitlab/install_script_testing/install_script_testing.yml +++ b/.gitlab/install_script_testing/install_script_testing.yml @@ -4,9 +4,8 @@ test_install_script: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - source /root/.bashrc - set +x - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - export TESTING_APT_URL=$DEB_TESTING_S3_BUCKET - export TESTING_YUM_URL=$RPM_TESTING_S3_BUCKET - export TEST_PIPELINE_ID=$CI_PIPELINE_ID diff --git a/.gitlab/integration_test/dogstatsd.yml b/.gitlab/integration_test/dogstatsd.yml index 5e5484df1024f..ab1862d716dae 100644 --- a/.gitlab/integration_test/dogstatsd.yml +++ b/.gitlab/integration_test/dogstatsd.yml @@ -4,14 +4,10 @@ dogstatsd_x64_size_test: stage: integration_test - rules: - - !reference [.except_mergequeue] - - when: on_success image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] needs: ["build_dogstatsd_static-binary_x64"] before_script: - - source /root/.bashrc - mkdir -p $STATIC_BINARIES_DIR - $S3_CP_CMD $S3_ARTIFACTS_URI/static/dogstatsd.amd64 $STATIC_BINARIES_DIR/dogstatsd script: diff --git a/.gitlab/integration_test/otel.yml b/.gitlab/integration_test/otel.yml index 511a0cf47cffa..d9d3c71e73510 100644 --- a/.gitlab/integration_test/otel.yml +++ b/.gitlab/integration_test/otel.yml @@ -6,8 +6,9 @@ integration_tests_otel: stage: integration_test image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] - needs: [] + needs: ["go_deps"] script: + - !reference [.retrieve_linux_go_deps] - inv check-otel-build - inv check-otel-module-versions rules: diff --git a/.gitlab/integration_test/windows.yml b/.gitlab/integration_test/windows.yml index a68f86ab8a16a..2ddf04fddb997 100644 --- a/.gitlab/integration_test/windows.yml +++ b/.gitlab/integration_test/windows.yml @@ -7,7 +7,11 @@ needs: ["go_deps", "go_tools_deps"] tags: ["runner:windows-docker", "windowsversion:1809"] before_script: - - $vcpkgBlobSaSUrl=$(& "$CI_PROJECT_DIR\tools\ci\aws_ssm_get_wrapper.ps1" $Env:VCPKG_BLOB_SAS_URL_SSM_NAME) + - $tmpfile = [System.IO.Path]::GetTempFileName() + - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:VCPKG_BLOB_SAS_URL" -tempFile "$tmpfile") + - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } + - $vcpkgBlobSaSUrl=$(cat "$tmpfile") + - Remove-Item "$tmpfile" script: - $ErrorActionPreference = "Stop" - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' diff --git a/.gitlab/internal_image_deploy/internal_image_deploy.yml b/.gitlab/internal_image_deploy/internal_image_deploy.yml index f4cb34de1588a..687c4671defe8 100644 --- a/.gitlab/internal_image_deploy/internal_image_deploy.yml +++ b/.gitlab/internal_image_deploy/internal_image_deploy.yml @@ -22,8 +22,7 @@ docker_trigger_internal: TMPL_SRC_REPO: ci/datadog-agent/agent RELEASE_STAGING: "true" script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -68,8 +67,7 @@ docker_trigger_internal-ot: TMPL_SRC_REPO: ci/datadog-agent/agent RELEASE_STAGING: "true" script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -115,8 +113,7 @@ docker_trigger_cluster_agent_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -162,8 +159,7 @@ docker_trigger_cws_instrumentation_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then diff --git a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml index 0ecef941fe652..605ac0def4114 100644 --- a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml @@ -36,8 +36,7 @@ internal_kubernetes_deploy_experimental: EXPLICIT_WORKFLOWS: "//workflows:beta_builds.agents_nightly.staging-deploy.publish,//workflows:beta_builds.agents_nightly.staging-validate.publish,//workflows:beta_builds.agents_nightly.prod-wait-business-hours.publish,//workflows:beta_builds.agents_nightly.prod-deploy.publish,//workflows:beta_builds.agents_nightly.prod-validate.publish,//workflows:beta_builds.agents_nightly.publish-image-confirmation.publish" BUNDLE_VERSION_OVERRIDE: "v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT --variable EXPLICIT_WORKFLOWS @@ -69,4 +68,4 @@ notify-slack: script: - export SDM_JWT=$(vault read -field=token identity/oidc/token/sdm) - python3 -m pip install -r tasks/requirements.txt - - inv pipeline.changelog ${CI_COMMIT_SHORT_SHA} + - inv pipeline.changelog ${CI_COMMIT_SHORT_SHA} || exit $? diff --git a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml index d3069ea0b320f..067ca517fdba9 100644 --- a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml @@ -22,8 +22,7 @@ rc_kubernetes_deploy: EXPLICIT_WORKFLOWS: "//workflows:deploy_rc.agents_rc" AGENT_IMAGE_TAG: $CI_COMMIT_REF_NAME script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT --variable EXPLICIT_WORKFLOWS diff --git a/.gitlab/kernel_matrix_testing/common.yml b/.gitlab/kernel_matrix_testing/common.yml index a75a9b9b8b04e..b2acf5a35d422 100644 --- a/.gitlab/kernel_matrix_testing/common.yml +++ b/.gitlab/kernel_matrix_testing/common.yml @@ -29,7 +29,7 @@ .write_ssh_key_file: - touch $AWS_EC2_SSH_KEY_FILE && chmod 600 $AWS_EC2_SSH_KEY_FILE - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SSH_KEY_SSM_NAME > $AWS_EC2_SSH_KEY_FILE + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY > $AWS_EC2_SSH_KEY_FILE || exit $? # Without the newline ssh silently fails and moves on to try other auth methods - echo "" >> $AWS_EC2_SSH_KEY_FILE - chmod 600 $AWS_EC2_SSH_KEY_FILE @@ -47,7 +47,7 @@ .kmt_new_profile: - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE_SSM_NAME >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci .define_if_collect_complexity: @@ -60,7 +60,7 @@ - echo "COLLECT_COMPLEXITY=${COLLECT_COMPLEXITY}" .collect_outcomes_kmt: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - export MICRO_VM_IP=$(jq --exit-status --arg TAG $TAG --arg ARCH $ARCH --arg TEST_SET $TEST_SET -r '.[$ARCH].microvms | map(select(."vmset-tags"| index($TEST_SET))) | map(select(.tag==$TAG)) | .[].ip' $CI_PROJECT_DIR/stack.output) # Collect setup-ddvm systemd service logs - mkdir -p $CI_PROJECT_DIR/logs @@ -114,7 +114,7 @@ scp $DD_AGENT_TESTING_DIR/kmt-dockers-$ARCH.tar.gz metal_instance:/opt/kernel-version-testing fi after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key @@ -142,11 +142,13 @@ KUBERNETES_MEMORY_REQUEST: "12Gi" KUBERNETES_MEMORY_LIMIT: "16Gi" VMCONFIG_FILE: "${CI_PROJECT_DIR}/vmconfig-${CI_PIPELINE_ID}-${ARCH}.json" + EXTERNAL_LINKS_PATH: external_links_$CI_JOB_ID.json before_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.retrieve_linux_go_deps] - !reference [.kmt_new_profile] - !reference [.write_ssh_key_file] + - inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH || true script: - echo "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" > $STACK_DIR - pulumi login $(cat $STACK_DIR | tr -d '\n') @@ -157,7 +159,7 @@ - jq "." $CI_PROJECT_DIR/stack.output - pulumi logout after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - export AWS_PROFILE=agent-qa-ci - !reference [.shared_filters_and_queries] - mkdir -p $CI_PROJECT_DIR/libvirt/log/$ARCH $CI_PROJECT_DIR/libvirt/xml $CI_PROJECT_DIR/libvirt/qemu $CI_PROJECT_DIR/libvirt/dnsmasq @@ -169,6 +171,19 @@ - scp -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE "ubuntu@$INSTANCE_IP:/tmp/ddvm-xml-*" $CI_PROJECT_DIR/libvirt/xml - scp -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE "ubuntu@$INSTANCE_IP:/tmp/qemu-ddvm-*.log" $CI_PROJECT_DIR/libvirt/qemu - scp -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE "ubuntu@$INSTANCE_IP:/tmp/dnsmasq/*" $CI_PROJECT_DIR/libvirt/dnsmasq + # build vm-metrics collector + - | + GO_ARCH=$ARCH + if [ "${ARCH}" == "x86_64" ]; then + GO_ARCH=amd64 + fi + - cd test/new-e2e && GOOS=linux GOARCH="${GO_ARCH}" go build system-probe/vm-metrics/vm-metrics.go + # The vm-metrics collector is uploaded and executed in the same job because we need to execute it after the datadog-agent + # is launched in the metal instance, and before the tests are executed. This place naturally satisfies these constraints. + # upload vm-metrics collector to metal instance + - scp -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE $CI_PROJECT_DIR/test/new-e2e/vm-metrics "ubuntu@$INSTANCE_IP:/home/ubuntu/vm-metrics" + # run vm-metrics collector + - ssh -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE "ubuntu@$INSTANCE_IP" "/home/ubuntu/vm-metrics -statsd-host=127.0.0.1 -statsd-port=8125 -libvirt-uri=/var/run/libvirt/libvirt-sock-ro --tag \"arch:${ARCH}\" --tag \"test-component:${TEST_COMPONENT}\" --tag \"ci-pipeline-id:${CI_PIPELINE_ID}\" --daemon -log-file /home/ubuntu/daemon.log" - !reference [.tag_kmt_ci_job] artifacts: when: always @@ -176,13 +191,16 @@ - $CI_PROJECT_DIR/stack.output - $CI_PROJECT_DIR/libvirt - $VMCONFIG_FILE + reports: + annotations: + - $EXTERNAL_LINKS_PATH .kmt_cleanup: stage: kernel_matrix_testing_cleanup image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES tags: ["arch:amd64"] before_script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - !reference [.kmt_new_profile] script: - !reference [.shared_filters_and_queries] @@ -199,7 +217,7 @@ aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" fi after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] # Manual cleanup jobs, these will be used to cleanup the instances after the tests @@ -214,6 +232,7 @@ .kmt_run_tests: retry: max: 2 + exit_codes: 42 when: - job_execution_timeout - runner_system_failure @@ -228,7 +247,7 @@ RETRY: 2 EXTERNAL_LINKS_PATH: external_links_$CI_JOB_ID.json before_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.kmt_new_profile] - !reference [.write_ssh_key_file] - echo "CI_JOB_URL=${CI_JOB_URL}" >> $DD_AGENT_TESTING_DIR/job_env.txt @@ -256,6 +275,7 @@ - NESTED_VM_CMD="/home/ubuntu/connector -host ${MICRO_VM_IP} -user root -ssh-file /home/kernel-version-testing/ddvm_rsa -vm-cmd 'CI=true /root/fetch_dependencies.sh ${ARCH} && COLLECT_COMPLEXITY=${COLLECT_COMPLEXITY} /opt/micro-vm-init.sh -test-tools /opt/testing-tools -retry ${RETRY} -test-root /opt/${TEST_COMPONENT}-tests -packages-run-config /opt/${TEST_SET}.json'" - $CI_PROJECT_DIR/connector-$ARCH -host $INSTANCE_IP -user ubuntu -ssh-file $AWS_EC2_SSH_KEY_FILE -vm-cmd "${NESTED_VM_CMD}" -send-env-vars DD_API_KEY # Allow DD_API_KEY to be passed to the metal instance, so we can use it to send metrics from the connector. - ssh metal_instance "ssh ${MICRO_VM_IP} '/opt/testing-tools/test-json-review -flakes /opt/testing-tools/flakes.yaml -codeowners /opt/testing-tools/CODEOWNERS -test-root /opt/${TEST_COMPONENT}-tests'" + - '[ ! -f $CI_PROJECT_DIR/daemon-${ARCH}.log ] && scp metal_instance:/home/ubuntu/daemon.log $CI_PROJECT_DIR/vm-metrics-daemon-${ARCH}.log' artifacts: expire_in: 2 weeks when: always @@ -265,6 +285,7 @@ - $DD_AGENT_TESTING_DIR/verifier-complexity-$ARCH-$TAG-${TEST_COMPONENT}.tar.gz - $CI_PROJECT_DIR/logs - $CI_PROJECT_DIR/pcaps + - $CI_PROJECT_DIR/vm-metrics-daemon-${ARCH}.log reports: annotations: - $EXTERNAL_LINKS_PATH @@ -311,13 +332,9 @@ notify_ebpf_complexity_changes: TEST_SET: no_usm allow_failure: true before_script: - - source /root/.bashrc - python3 -m pip install tabulate # Required for printing the tables - python3 -m pip install -r tasks/libs/requirements-github.txt - - | - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY_SSM_NAME | base64) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID_SSM_NAME) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID_SSM_NAME) - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) + - !reference [.setup_agent_github_app] + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN script: - inv -e ebpf.generate-complexity-summary-for-pr diff --git a/.gitlab/kernel_matrix_testing/security_agent.yml b/.gitlab/kernel_matrix_testing/security_agent.yml index 8df130eb83782..c75f78c5449d8 100644 --- a/.gitlab/kernel_matrix_testing/security_agent.yml +++ b/.gitlab/kernel_matrix_testing/security_agent.yml @@ -72,7 +72,7 @@ kmt_setup_env_secagent_x64: # upload connector to metal instance - scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key diff --git a/.gitlab/kernel_matrix_testing/system_probe.yml b/.gitlab/kernel_matrix_testing/system_probe.yml index f871f2aca10a1..da9c47e167702 100644 --- a/.gitlab/kernel_matrix_testing/system_probe.yml +++ b/.gitlab/kernel_matrix_testing/system_probe.yml @@ -28,13 +28,14 @@ upload_dependencies_sysprobe_arm64: stage: kernel_matrix_testing_prepare script: # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY | crane auth login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_RO user) || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_RO token | crane auth login --username "$DOCKER_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi # Pull base images - mkdir $KMT_DOCKERS - inv -e system-probe.save-test-dockers --use-crane --output-dir $KMT_DOCKERS --arch $ARCH after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] artifacts: expire_in: 1 day @@ -81,7 +82,7 @@ pull_test_dockers_arm64: - !reference [.setup_ssh_config] - scp $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/$ARCHIVE_NAME metal_instance:/opt/kernel-version-testing/ after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] variables: DEPENDENCIES: $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/btfs @@ -160,7 +161,7 @@ kmt_setup_env_sysprobe_x64: # upload connector to metal instance - scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key diff --git a/.gitlab/kitchen_deploy/kitchen_deploy.yml b/.gitlab/kitchen_deploy/kitchen_deploy.yml index 4184c08c53f8b..cc8657b6bbd39 100644 --- a/.gitlab/kitchen_deploy/kitchen_deploy.yml +++ b/.gitlab/kitchen_deploy/kitchen_deploy.yml @@ -3,15 +3,14 @@ # Contains jobs which deploy Agent package to testing repsoitories that are used in kitchen tests. .setup_rpm_signing_key: &setup_rpm_signing_key - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME) - - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME) + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) || exit $? .setup_apt_signing_key: &setup_apt_signing_key - - APT_SIGNING_PRIVATE_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_GPG_KEY_SSM_NAME) - - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DEB_SIGNING_PASSPHRASE_SSM_NAME) - - - printf -- "$APT_SIGNING_PRIVATE_KEY" | gpg --import --batch + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE) || exit $? .setup_signing_keys_package: &setup_signing_keys_package # Set up prod apt repo to get the datadog-signing-keys package @@ -40,7 +39,6 @@ variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a6 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR deploy_deb_testing-a6_x64: @@ -85,7 +83,6 @@ deploy_deb_testing-a6_arm64: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR deploy_deb_testing-a7_x64: @@ -139,7 +136,6 @@ deploy_deb_testing-a7_arm64: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a6 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR deploy_rpm_testing-a6_x64: @@ -177,7 +173,6 @@ deploy_rpm_testing-a6_arm64: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR deploy_rpm_testing-a7_x64: @@ -226,7 +221,6 @@ deploy_suse_rpm_testing_x64-a6: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a6 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR_SUSE script: - *setup_rpm_signing_key @@ -252,7 +246,6 @@ deploy_suse_rpm_testing_x64-a7: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR_SUSE script: - *setup_rpm_signing_key @@ -271,7 +264,6 @@ deploy_suse_rpm_testing_arm64-a7: variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR_SUSE script: - *setup_rpm_signing_key @@ -288,7 +280,6 @@ deploy_windows_testing-a6: tags: ["arch:amd64"] needs: ["lint_windows-x64", "windows_msi_x64-a6"] before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR script: - $S3_CP_CMD --recursive --exclude "*" --include "datadog-agent-6.*.msi" $OMNIBUS_PACKAGE_DIR s3://$WIN_S3_BUCKET/$WINDOWS_TESTING_S3_BUCKET_A6 --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 @@ -304,7 +295,6 @@ deploy_windows_testing-a7: needs: ["lint_windows-x64", "windows_msi_and_bosh_zip_x64-a7", "windows-installer-amd64"] before_script: - - source /root/.bashrc - ls $OMNIBUS_PACKAGE_DIR script: - $S3_CP_CMD diff --git a/.gitlab/lint/include.yml b/.gitlab/lint/include.yml new file mode 100644 index 0000000000000..27676d98eab76 --- /dev/null +++ b/.gitlab/lint/include.yml @@ -0,0 +1,6 @@ +# liont stage +# Include job that run linters on the Agent code. + +include: + - .gitlab/lint/technical_linters.yml + diff --git a/.gitlab/lint/technical_linters.yml b/.gitlab/lint/technical_linters.yml new file mode 100644 index 0000000000000..099cd975bb8a9 --- /dev/null +++ b/.gitlab/lint/technical_linters.yml @@ -0,0 +1,64 @@ + +.lint: + stage: lint + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + +lint_licenses: + extends: .lint + script: + - !reference [.retrieve_linux_go_deps] + - !reference [.retrieve_linux_go_tools_deps] + - inv -e install-tools + - inv -e lint-licenses + needs: ["go_tools_deps", "go_deps"] + +lint_shell: + extends: .lint + script: + - inv -e install-shellcheck + - shellcheck --version + #Excludes: + #SC2028: echo may not expand escape sequences. Use printf. + #SC2059: Don't use variables in the printf format string. Use printf "..%s.." "$foo". + - shellcheck --severity=info -e SC2059 -e SC2028 --shell=bash ./cmd/**/*.sh ./omnibus/package-scripts/*/* + +lint_filename: + extends: .lint + script: + - inv -e linter.filenames + +lint_copyrights: + extends: .lint + script: + - inv -e linter.copyrights + +lint_codeowners: + extends: .lint + script: + - inv -e github.lint-codeowner + +lint_components: + extends: .lint + script: + - inv -e lint-components lint-fxutil-oneshot-test + + +lint_python: + extends: .lint + needs: [] + script: + - inv -e linter.python + +lint_update_go: + extends: .lint + needs: [] + script: + - inv -e linter.update-go + +validate_modules: + extends: .lint + needs: [] + script: + - inv -e modules.validate + - inv -e modules.validate-used-by-otel diff --git a/.gitlab/maintenance_jobs/docker.yml b/.gitlab/maintenance_jobs/docker.yml index 43f8bded7ae2f..ca336071df7ef 100644 --- a/.gitlab/maintenance_jobs/docker.yml +++ b/.gitlab/maintenance_jobs/docker.yml @@ -43,29 +43,3 @@ revert_latest_7: IMG_DESTINATIONS: dogstatsd:7,dogstatsd:latest - IMG_SOURCES: datadog/cluster-agent:${NEW_LATEST_RELEASE_7} IMG_DESTINATIONS: cluster-agent:latest - -# -# Use this step to delete a tag of a given image -# We call the Docker Hub API because docker cli doesn't support deleting tags -# - Run a pipeline on main with the IMAGE and TAG env vars -# - in the gitlab pipeline view, trigger the step (in the first column) -delete_docker_tag: - rules: !reference [.on_main_manual] - stage: maintenance_jobs - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/docker-notary:v2718650-9ce6565-0.6.1-py3 - tags: ["arch:amd64"] - dependencies: [] - variables: - IMAGE: "" # image name, for example "agent" - TAG: "" # tag name, for example "6.9.0" - ORGANIZATION: "datadog" - before_script: - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_LOGIN_SSM_KEY) - - PASS=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $DOCKER_REGISTRY_PWD_SSM_KEY) - - python3 -m pip install -r requirements.txt - - | - export DOCKER_TOKEN=`curl -s -H "Content-Type: application/json" -X POST -d '{"username": "'$DOCKER_REGISTRY_LOGIN'", "password": "'$PASS'"}' https://hub.docker.com/v2/users/login/ | python -c 'import sys, json; print(json.load(sys.stdin)["token"].strip())'` - script: - - if [[ -z "$IMAGE" ]]; then echo "Need an image"; exit 1; fi - - if [[ -z "$TAG" ]]; then echo "Need a tag to delete"; exit 1; fi - - inv -e docker.delete ${ORGANIZATION} ${IMAGE} ${TAG} ${DOCKER_TOKEN} &>/dev/null diff --git a/.gitlab/maintenance_jobs/kitchen.yml b/.gitlab/maintenance_jobs/kitchen.yml index b37355076f395..1716ac845ad86 100644 --- a/.gitlab/maintenance_jobs/kitchen.yml +++ b/.gitlab/maintenance_jobs/kitchen.yml @@ -26,10 +26,10 @@ periodic_kitchen_cleanup_azure: # the job to be run one at a time. resource_group: azure_cleanup script: - - export ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME` - - export ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME` - - export ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME` - - export ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME` + - ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID` || exit $?; export ARM_SUBSCRIPTION_ID + - ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID` || exit $?; export ARM_CLIENT_ID + - ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET` || exit $?; export ARM_CLIENT_SECRET + - ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID` || exit $?; export ARM_TENANT_ID # Remove kitchen resources for all existing test suite prefixes - RESOURCE_GROUP_PREFIX=kitchen-chef python3 /deploy_scripts/cleanup_azure.py - RESOURCE_GROUP_PREFIX=kitchen-win python3 /deploy_scripts/cleanup_azure.py diff --git a/.gitlab/notify/notify.yml b/.gitlab/notify/notify.yml index 7c6e8aa580159..ea966d752ef63 100644 --- a/.gitlab/notify/notify.yml +++ b/.gitlab/notify/notify.yml @@ -25,8 +25,8 @@ notify: resource_group: notification timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) || exit $?; export GITLAB_TOKEN + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt - | # Do not send notifications if this is a child pipeline of another repo @@ -53,9 +53,8 @@ send_pipeline_stats: when: always dependencies: [] script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) || exit $?; export GITLAB_TOKEN + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - invoke -e notify.send-stats notify_github: @@ -79,7 +78,6 @@ notify_github: dependencies: [] allow_failure: true script: - - source /root/.bashrc - !reference [.install_pr_commenter] - messagefile="$(mktemp)" - echo "Use this command from [test-infra-definitions](https://github.com/DataDog/test-infra-definitions) to manually test this PR changes on a VM:" >> "$messagefile" @@ -93,7 +91,7 @@ notify_github: notify_gitlab_ci_changes: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES stage: notify - needs: [] + needs: [compute_gitlab_ci_config] tags: ["arch:amd64"] rules: - if: $CI_PIPELINE_SOURCE != "push" @@ -103,20 +101,11 @@ notify_gitlab_ci_changes: - .gitlab-ci.yml - .gitlab/**/*.yml compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 - before_script: - # Get main history - - git fetch origin main - - git checkout main - - git checkout - script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - - | - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_APP_KEY_SSM_NAME | base64) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID_SSM_NAME) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID_SSM_NAME) - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_FULL_API_TOKEN_SSM_NAME) - - inv -e notify.gitlab-ci-diff --pr-comment + - !reference [.setup_agent_github_app] + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN + - inv -e notify.gitlab-ci-diff --from-diff artifacts/diff.gitlab-ci.yml --pr-comment .failure_summary_job: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES @@ -127,10 +116,9 @@ notify_gitlab_ci_changes: timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above .failure_summary_setup: - - source /root/.bashrc - - export SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $SLACK_AGENT_CI_TOKEN_SSM_NAME) - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_READ_API_TOKEN_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SLACK_AGENT_CI_TOKEN) || exit $?; export SLACK_API_TOKEN + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) || exit $?; export GITLAB_TOKEN + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt # Upload failure summary data to S3 at the end of each main pipeline @@ -167,3 +155,28 @@ notify_failure_summary_daily: echo 'Sending weekly summary' inv -e notify.failure-summary-send-notifications --weekly-summary fi + +close_failing_tests_stale_issues: + stage: notify + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + rules: + # Daily + - if: $CI_COMMIT_BRANCH != "main" || $CI_PIPELINE_SOURCE != "schedule" + when: never + - !reference [.on_deploy_nightly_repo_branch_always] + needs: [] + tags: ["arch:arm64"] + script: + - weekday="$(date --utc '+%A')" + # Weekly on Friday + - | + if [ "$weekday" != "Friday" ]; then + echo "This script is run weekly on Fridays" + exit + fi + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY + - DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $APP_KEY_ORG2) || exit $?; export DD_APP_KEY + - ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $ATLASSIAN_WRITE token) || exit $?; export ATLASSIAN_PASSWORD + - ATLASSIAN_USERNAME=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $ATLASSIAN_WRITE user) || exit $?; export ATLASSIAN_USERNAME + - python3 -m pip install -r requirements.txt -r tasks/requirements_release_tasks.txt # For Atlassian / Jira dependencies + - inv -e notify.close-failing-tests-stale-issues diff --git a/.gitlab/package_build/dmg.yml b/.gitlab/package_build/dmg.yml index cf07e07415e7d..02d0b830cc910 100644 --- a/.gitlab/package_build/dmg.yml +++ b/.gitlab/package_build/dmg.yml @@ -34,5 +34,4 @@ agent_dmg-x64-a7: PYTHON_RUNTIMES: "3" timeout: 6h before_script: - - source /root/.bashrc - export RELEASE_VERSION=$RELEASE_VERSION_7 diff --git a/.gitlab/package_build/heroku.yml b/.gitlab/package_build/heroku.yml index 337bc54932f56..f7675d9114d94 100644 --- a/.gitlab/package_build/heroku.yml +++ b/.gitlab/package_build/heroku.yml @@ -4,7 +4,7 @@ rules: - !reference [.except_mergequeue] - when: on_success - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-17-x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] needs: [ @@ -14,7 +14,6 @@ "generate_minimized_btfs_x64", ] script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to build for $RELEASE_VERSION" @@ -41,6 +40,9 @@ KUBERNETES_CPU_REQUEST: 16 DD_PKG_ARCH: "x86_64" PACKAGE_ARCH: "amd64" + DD_CC: 'x86_64-unknown-linux-gnu-gcc' + DD_CXX: 'x86_64-unknown-linux-gnu-g++' + DD_CMAKE_TOOLCHAIN: '/opt/cmake/x86_64-unknown-linux-gnu.toolchain.cmake' artifacts: expire_in: 2 weeks paths: diff --git a/.gitlab/package_build/installer.yml b/.gitlab/package_build/installer.yml index ac335677c8bad..41c4fa719b599 100644 --- a/.gitlab/package_build/installer.yml +++ b/.gitlab/package_build/installer.yml @@ -5,7 +5,8 @@ .common_build_oci: script: - echo "About to build for $RELEASE_VERSION" - - export INSTALL_DIR=/opt/datadog-packages/datadog-agent/$(inv agent.version -u)-1 + - AGENT_VERSION="$(inv agent.version -u)-1" || exit $? + - export INSTALL_DIR=/opt/datadog-packages/datadog-agent/"$AGENT_VERSION" - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] # remove artifacts from previous pipelines that may come from the cache @@ -40,7 +41,7 @@ datadog-agent-oci-x64-a7: - !reference [.except_mergequeue] - when: on_success stage: package_build - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-17-x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] needs: [ @@ -54,8 +55,10 @@ datadog-agent-oci-x64-a7: PYTHON_RUNTIMES: "3" PACKAGE_ARCH: amd64 DESTINATION_OCI: "datadog-agent-7-remote-updater-amd64.tar.xz" + DD_CC: 'x86_64-unknown-linux-gnu-gcc' + DD_CXX: 'x86_64-unknown-linux-gnu-g++' + DD_CMAKE_TOOLCHAIN: '/opt/cmake/x86_64-unknown-linux-gnu.toolchain.cmake' before_script: - - source /root/.bashrc - export RELEASE_VERSION=$RELEASE_VERSION_7 datadog-agent-oci-arm64-a7: @@ -64,7 +67,7 @@ datadog-agent-oci-arm64-a7: - !reference [.except_mergequeue] - when: on_success stage: package_build - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-23-arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:arm64"] needs: [ @@ -78,8 +81,10 @@ datadog-agent-oci-arm64-a7: PYTHON_RUNTIMES: "3" PACKAGE_ARCH: arm64 DESTINATION_OCI: "datadog-agent-7-remote-updater-arm64.tar.xz" + DD_CC: 'aarch64-unknown-linux-gnu-gcc' + DD_CXX: 'aarch64-unknown-linux-gnu-g++' + DD_CMAKE_TOOLCHAIN: '/opt/cmake/aarch64-unknown-linux-gnu.toolchain.cmake' before_script: - - source /root/.bashrc - export RELEASE_VERSION=$RELEASE_VERSION_7 # @@ -87,7 +92,6 @@ datadog-agent-oci-arm64-a7: # .installer_build_common: script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to build for $RELEASE_VERSION" @@ -119,13 +123,15 @@ installer-amd64: - !reference [.except_mergequeue] - when: on_success stage: package_build - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-17-x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] needs: ["go_mod_tidy_check", "go_deps"] variables: PACKAGE_ARCH: amd64 DESTINATION_FILE: "datadog-installer_7-amd64.tar.xz" RELEASE_VERSION: "$RELEASE_VERSION_7" + DD_CC: 'x86_64-unknown-linux-gnu-gcc' + DD_CXX: 'x86_64-unknown-linux-gnu-g++' installer-arm64: extends: .installer_build_common @@ -133,21 +139,23 @@ installer-arm64: - !reference [.except_mergequeue] - when: on_success stage: package_build - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-23-arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:arm64"] needs: ["go_mod_tidy_check", "go_deps"] variables: PACKAGE_ARCH: arm64 DESTINATION_FILE: "datadog-installer_7-arm64.tar.xz" RELEASE_VERSION: "$RELEASE_VERSION_7" + DD_CC: 'aarch64-unknown-linux-gnu-gcc' + DD_CXX: 'aarch64-unknown-linux-gnu-g++' installer-amd64-oci: extends: installer-amd64 variables: DESTINATION_FILE: "datadog-updater_7-amd64-oci.tar.xz" before_script: - - source /root/.bashrc - - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/$(inv agent.version -u)-1 + - AGENT_VERSION="$(inv agent.version -u)-1" || exit $? + - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/"$AGENT_VERSION" - export INSTALL_DIR_PARAM="--install-directory=$INSTALL_DIR" installer-arm64-oci: @@ -155,8 +163,8 @@ installer-arm64-oci: variables: DESTINATION_FILE: "datadog-updater_7-arm64-oci.tar.xz" before_script: - - source /root/.bashrc - - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/$(inv agent.version -u)-1 + - AGENT_VERSION="$(inv agent.version -u)-1" || exit $? + - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/"$AGENT_VERSION" - export INSTALL_DIR_PARAM="--install-directory=$INSTALL_DIR" windows-installer-amd64: @@ -188,7 +196,7 @@ windows-installer-amd64: -e SIGN_WINDOWS_DD_WCS=true -e S3_OMNIBUS_CACHE_BUCKET="$S3_OMNIBUS_CACHE_BUCKET" -e USE_S3_CACHING="$USE_S3_CACHING" - -e API_KEY_ORG2_SSM_NAME=${API_KEY_ORG2_SSM_NAME} + -e API_KEY_ORG2=${API_KEY_ORG2} 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\buildinstaller.bat after_script: diff --git a/.gitlab/package_build/linux.yml b/.gitlab/package_build/linux.yml index db66682e4d356..5ce39b8a3eb1f 100644 --- a/.gitlab/package_build/linux.yml +++ b/.gitlab/package_build/linux.yml @@ -1,26 +1,28 @@ +.agent_build_script: + - echo "About to build for $RELEASE_VERSION" + - !reference [.retrieve_linux_go_deps] + - !reference [.cache_omnibus_ruby_deps, setup] + # remove artifacts from previous pipelines that may come from the cache + - rm -rf $OMNIBUS_PACKAGE_DIR/* + # Artifacts and cache must live within project directory but we run omnibus in a neutral directory. + # Thus, we move the artifacts at the end in a gitlab-friendly dir. + - tar -xf $CI_PROJECT_DIR/sysprobe-build-outputs.tar.xz + - mkdir -p /tmp/system-probe + - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/clang-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/clang-bpf + - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/llc-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/llc-bpf + - cp $CI_PROJECT_DIR/minimized-btfs.tar.xz /tmp/system-probe/minimized-btfs.tar.xz + - chmod 0744 /tmp/system-probe/clang-bpf /tmp/system-probe/llc-bpf + - inv -e omnibus.build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --python-runtimes "$PYTHON_RUNTIMES" --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod" --system-probe-bin=/tmp/system-probe --flavor "$FLAVOR" --config-directory "$CONFIG_DIR" --install-directory "$INSTALL_DIR" + - ls -la $OMNIBUS_PACKAGE_DIR + - !reference [.upload_sbom_artifacts] + .agent_build_common: rules: - !reference [.except_mergequeue] - when: on_success stage: package_build script: - - source /root/.bashrc - - echo "About to build for $RELEASE_VERSION" - - !reference [.retrieve_linux_go_deps] - - !reference [.cache_omnibus_ruby_deps, setup] - # remove artifacts from previous pipelines that may come from the cache - - rm -rf $OMNIBUS_PACKAGE_DIR/* - # Artifacts and cache must live within project directory but we run omnibus in a neutral directory. - # Thus, we move the artifacts at the end in a gitlab-friendly dir. - - tar -xf $CI_PROJECT_DIR/sysprobe-build-outputs.tar.xz - - mkdir -p /tmp/system-probe - - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/clang-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/clang-bpf - - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/llc-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/llc-bpf - - cp $CI_PROJECT_DIR/minimized-btfs.tar.xz /tmp/system-probe/minimized-btfs.tar.xz - - chmod 0744 /tmp/system-probe/clang-bpf /tmp/system-probe/llc-bpf - - inv -e omnibus.build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --python-runtimes "$PYTHON_RUNTIMES" --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod" --system-probe-bin=/tmp/system-probe --flavor "$FLAVOR" - - ls -la $OMNIBUS_PACKAGE_DIR - - !reference [.upload_sbom_artifacts] + - !reference [.agent_build_script] variables: KUBERNETES_CPU_REQUEST: 16 KUBERNETES_MEMORY_REQUEST: "32Gi" @@ -33,7 +35,7 @@ - !reference [.cache_omnibus_ruby_deps, cache] .agent_build_x86: - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-17-x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] needs: [ @@ -44,9 +46,12 @@ ] variables: PACKAGE_ARCH: amd64 + DD_CC: 'x86_64-unknown-linux-gnu-gcc' + DD_CXX: 'x86_64-unknown-linux-gnu-g++' + DD_CMAKE_TOOLCHAIN: '/opt/cmake/x86_64-unknown-linux-gnu.toolchain.cmake' .agent_build_arm64: - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-23-arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:arm64"] needs: [ @@ -57,6 +62,9 @@ ] variables: PACKAGE_ARCH: arm64 + DD_CC: 'aarch64-unknown-linux-gnu-gcc' + DD_CXX: 'aarch64-unknown-linux-gnu-g++' + DD_CMAKE_TOOLCHAIN: '/opt/cmake/aarch64-unknown-linux-gnu.toolchain.cmake' .agent_6_build: variables: @@ -82,7 +90,37 @@ before_script: - export RELEASE_VERSION=$RELEASE_VERSION_7 -# build Agent 6 binaries for x86_64 +# Temporary custom agent build test to prevent regression +# This test will be removed when custom path are used to build macos agent +# with in-house macos runner builds. +datadog-agent-7-x64-custom-path-test: + extends: [.agent_build_x86, .agent_7_build] + rules: + - !reference [.except_mergequeue] + - when: on_success + stage: package_build + script: + - mkdir /custom + - export CONFIG_DIR="/custom" + - export INSTALL_DIR="/custom/datadog-agent" + - !reference [.agent_build_script] + - ls -la $OMNIBUS_PACKAGE_DIR + - ls -la $INSTALL_DIR + - ls -la /custom/etc + - (ls -la /opt/datadog-agent 2>/dev/null && exit 1) || echo "/opt/datadog-agent has correctly not been generated" + - (ls -la /etc/datadog-agent 2>/dev/null && exit 1) || echo "/etc/datadog-agent has correctly not been generated" + variables: + KUBERNETES_CPU_REQUEST: 16 + KUBERNETES_MEMORY_REQUEST: "32Gi" + KUBERNETES_MEMORY_LIMIT: "32Gi" + artifacts: + expire_in: 2 weeks + paths: + - $OMNIBUS_PACKAGE_DIR + cache: + - !reference [.cache_omnibus_ruby_deps, cache] + + # build Agent 6 binaries for x86_64 datadog-agent-6-x64: extends: [.agent_build_common, .agent_build_x86, .agent_6_build] @@ -110,7 +148,6 @@ datadog-ot-agent-7-arm64: extends: .agent_build_common needs: ["go_mod_tidy_check", "go_deps"] script: - - source /root/.bashrc - echo "About to build for $RELEASE_VERSION" - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] @@ -125,12 +162,18 @@ datadog-ot-agent-7-arm64: iot-agent-x64: extends: .iot-agent-common tags: ["arch:amd64"] - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-17-x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + variables: + DD_CC: 'x86_64-unknown-linux-gnu-gcc' + DD_CXX: 'x86_64-unknown-linux-gnu-g++' iot-agent-arm64: extends: .iot-agent-common tags: ["arch:arm64"] - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-23-arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + variables: + DD_CC: 'aarch64-unknown-linux-gnu-gcc' + DD_CXX: 'aarch64-unknown-linux-gnu-g++' iot-agent-armhf: extends: .iot-agent-common @@ -149,7 +192,6 @@ iot-agent-armhf: - when: on_success stage: package_build script: - - source /root/.bashrc - echo "About to build for $RELEASE_VERSION" - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] @@ -172,12 +214,18 @@ iot-agent-armhf: dogstatsd-x64: extends: .dogstatsd_build_common needs: ["go_mod_tidy_check", "build_dogstatsd-binary_x64", "go_deps"] - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-17-x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] + variables: + DD_CC: 'x86_64-unknown-linux-gnu-gcc' + DD_CXX: 'x86_64-unknown-linux-gnu-g++' dogstatsd-arm64: extends: .dogstatsd_build_common - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES + image: registry.ddbuild.io/ci/datadog-agent-buildimages/linux-glibc-2-23-arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:arm64"] needs: ["go_mod_tidy_check", "build_dogstatsd-binary_arm64", "go_deps"] + variables: + DD_CC: 'aarch64-unknown-linux-gnu-gcc' + DD_CXX: 'aarch64-unknown-linux-gnu-g++' diff --git a/.gitlab/package_build/windows.yml b/.gitlab/package_build/windows.yml index 2d45a7b0ceecd..b54b3c33eb677 100644 --- a/.gitlab/package_build/windows.yml +++ b/.gitlab/package_build/windows.yml @@ -36,7 +36,7 @@ -e GO_VERSION_CHECK="true" -e BUNDLE_MIRROR__RUBYGEMS__ORG=${BUNDLE_MIRROR__RUBYGEMS__ORG} -e PIP_INDEX_URL=${PIP_INDEX_URL} - -e API_KEY_ORG2_SSM_NAME=${API_KEY_ORG2_SSM_NAME} + -e API_KEY_ORG2=${API_KEY_ORG2} 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\buildwin.bat - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } @@ -123,7 +123,7 @@ windows_zip_agent_binaries_x64-a7: -e USE_S3_CACHING="$USE_S3_CACHING" -e BUNDLE_MIRROR__RUBYGEMS__ORG=${BUNDLE_MIRROR__RUBYGEMS__ORG} -e PIP_INDEX_URL=${PIP_INDEX_URL} - -e API_KEY_ORG2_SSM_NAME=${API_KEY_ORG2_SSM_NAME} + -e API_KEY_ORG2=${API_KEY_ORG2} 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\buildwin.bat - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } diff --git a/.gitlab/packaging/deb.yml b/.gitlab/packaging/deb.yml index cd136706ff1e6..b45aad5855716 100644 --- a/.gitlab/packaging/deb.yml +++ b/.gitlab/packaging/deb.yml @@ -2,7 +2,6 @@ .package_deb_common: stage: packaging script: - - source /root/.bashrc - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to package for $RELEASE_VERSION" - !reference [.setup_deb_signing_key] @@ -18,6 +17,7 @@ KUBERNETES_CPU_REQUEST: 16 KUBERNETES_MEMORY_REQUEST: "32Gi" KUBERNETES_MEMORY_LIMIT: "32Gi" + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/agent-deb.txt" cache: - !reference [.cache_omnibus_ruby_deps, cache] @@ -88,7 +88,6 @@ agent_deb-arm64-a7: .package_ot_deb_common: extends: [.package_deb_common] script: - - source /root/.bashrc - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to package for $RELEASE_VERSION" - !reference [.setup_deb_signing_key] @@ -125,6 +124,9 @@ installer_deb-amd64: variables: DESTINATION_DEB: "datadog-installer_7_amd64.deb" DD_PROJECT: "installer" + # There are currently no files to check for in the installer so we + # explicitly disable the check + PACKAGE_REQUIRED_FILES_LIST: "" installer_deb-arm64: extends: [.package_deb_common, .package_deb_arm64, .package_deb_agent_7] @@ -135,6 +137,7 @@ installer_deb-arm64: variables: DESTINATION_DEB: "datadog-installer_7_arm64.deb" DD_PROJECT: "installer" + PACKAGE_REQUIRED_FILES_LIST: "" .package_iot_deb_common: extends: [.package_deb_agent_7] @@ -143,7 +146,6 @@ installer_deb-arm64: - when: on_success stage: packaging script: - - source /root/.bashrc - !reference [.cache_omnibus_ruby_deps, setup] - echo "About to package for $RELEASE_VERSION" - !reference [.setup_deb_signing_key] @@ -159,6 +161,7 @@ installer_deb-arm64: KUBERNETES_MEMORY_REQUEST: "32Gi" KUBERNETES_MEMORY_LIMIT: "32Gi" OMNIBUS_PACKAGE_ARTIFACT_DIR: $OMNIBUS_PACKAGE_DIR + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/iot-agent-deb.txt" cache: - !reference [.cache_omnibus_ruby_deps, cache] @@ -194,6 +197,7 @@ dogstatsd_deb-x64: variables: DD_PROJECT: dogstatsd DESTINATION_DEB: "datadog-dogstatsd_amd64.deb" + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-deb.txt" dogstatsd_deb-arm64: extends: [.package_deb_common, .package_deb_arm64, .package_deb_agent_7] @@ -204,4 +208,5 @@ dogstatsd_deb-arm64: variables: DD_PROJECT: dogstatsd DESTINATION_DEB: "datadog-dogstatsd_arm64.deb" + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-deb.txt" diff --git a/.gitlab/packaging/oci.yml b/.gitlab/packaging/oci.yml index 4598fa8050336..0efb48a3e6e0e 100644 --- a/.gitlab/packaging/oci.yml +++ b/.gitlab/packaging/oci.yml @@ -6,14 +6,14 @@ image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] before_script: - - source /root/.bashrc - - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7)-1 + - PACKAGE_VERSION="$(inv agent.version --url-safe --major-version 7)-1" || exit $? - export INSTALL_DIR=/opt/datadog-packages/${OCI_PRODUCT}/${PACKAGE_VERSION} variables: KUBERNETES_CPU_REQUEST: 16 KUBERNETES_MEMORY_REQUEST: "32Gi" KUBERNETES_MEMORY_LIMIT: "32Gi" script: + - !reference [.retrieve_linux_go_tools_deps] - rm -f $OMNIBUS_PACKAGE_DIR/*-dbg-*.tar.xz - ls -l $OMNIBUS_PACKAGE_DIR - python3 -m pip install -r tasks/libs/requirements-github.txt @@ -68,12 +68,12 @@ agent_oci: extends: .package_oci - needs: ["datadog-agent-oci-x64-a7", "datadog-agent-oci-arm64-a7", "windows_msi_and_bosh_zip_x64-a7"] + needs: ["datadog-agent-oci-x64-a7", "datadog-agent-oci-arm64-a7", "windows_msi_and_bosh_zip_x64-a7", "go_tools_deps"] variables: OCI_PRODUCT: "datadog-agent" installer_oci: extends: .package_oci - needs: ["installer-arm64-oci", "installer-amd64-oci", "windows-installer-amd64"] + needs: ["installer-arm64-oci", "installer-amd64-oci", "windows-installer-amd64", "go_tools_deps"] variables: OCI_PRODUCT: "datadog-installer" diff --git a/.gitlab/packaging/rpm.yml b/.gitlab/packaging/rpm.yml index f337dc8124924..7ab26f7d86e9b 100644 --- a/.gitlab/packaging/rpm.yml +++ b/.gitlab/packaging/rpm.yml @@ -5,13 +5,12 @@ - !reference [.except_mergequeue] - when: on_success before_script: - - source /root/.bashrc script: - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME) - - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME) + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) || exit $?; export RPM_SIGNING_PASSPHRASE - inv -e omnibus.build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --target-project=${DD_PROJECT} ${OMNIBUS_EXTRA_ARGS} - ls -la $OMNIBUS_PACKAGE_DIR/ - !reference [.lint_linux_packages] @@ -25,6 +24,7 @@ KUBERNETES_MEMORY_REQUEST: "32Gi" KUBERNETES_MEMORY_LIMIT: "32Gi" OMNIBUS_PACKAGE_ARTIFACT_DIR: $OMNIBUS_PACKAGE_DIR + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/agent-rpm.txt" cache: - !reference [.cache_omnibus_ruby_deps, cache] @@ -112,36 +112,41 @@ installer_rpm-amd64: needs: ["installer-amd64"] variables: DD_PROJECT: installer + # There are currently no files to check for in the installer so we + # explicitly disable the check + PACKAGE_REQUIRED_FILES_LIST: "" installer_rpm-arm64: extends: [.package_rpm_common, .package_rpm_agent_7, .package_rpm_arm64] needs: ["installer-arm64"] variables: DD_PROJECT: installer + PACKAGE_REQUIRED_FILES_LIST: "" installer_suse_rpm-amd64: extends: [.package_suse_rpm_common, .package_rpm_agent_7, .package_rpm_x86] needs: ["installer-amd64"] variables: DD_PROJECT: installer + PACKAGE_REQUIRED_FILES_LIST: "" installer_suse_rpm-arm64: extends: [.package_suse_rpm_common, .package_rpm_agent_7, .package_rpm_arm64] needs: ["installer-arm64"] variables: DD_PROJECT: installer + PACKAGE_REQUIRED_FILES_LIST: "" .package_iot_rpm_common: rules: - !reference [.except_mergequeue] - when: on_success script: - - source /root/.bashrc - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME) - - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME) + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) || exit $?; export RPM_SIGNING_PASSPHRASE - inv -e omnibus.build --release-version "$RELEASE_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --flavor=iot ${OMNIBUS_EXTRA_ARGS} - ls -la $OMNIBUS_PACKAGE_DIR/ - !reference [.lint_linux_packages] @@ -156,6 +161,7 @@ installer_suse_rpm-arm64: KUBERNETES_MEMORY_LIMIT: "32Gi" OMNIBUS_PACKAGE_ARTIFACT_DIR: $OMNIBUS_PACKAGE_DIR RELEASE_VERSION: $RELEASE_VERSION_7 + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/iot-agent-rpm.txt" cache: - !reference [.cache_omnibus_ruby_deps, cache] @@ -203,9 +209,11 @@ dogstatsd_rpm-x64: needs: ["dogstatsd-x64"] variables: DD_PROJECT: dogstatsd + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-rpm.txt" dogstatsd_suse-x64: extends: [.package_suse_rpm_common, .package_rpm_agent_7, .package_rpm_x86] needs: ["dogstatsd-x64"] variables: DD_PROJECT: dogstatsd + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-rpm.txt" diff --git a/.gitlab/pkg_metrics/pkg_metrics.yml b/.gitlab/pkg_metrics/pkg_metrics.yml index 8ff246b8c2667..21c647bbcd805 100644 --- a/.gitlab/pkg_metrics/pkg_metrics.yml +++ b/.gitlab/pkg_metrics/pkg_metrics.yml @@ -56,10 +56,8 @@ send_pkg_size: - job: iot_agent_suse-x64 optional: true script: - - source /root/.bashrc - # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY # Allow failures: some packages are not always built, and therefore stats cannot be sent for them - set +e @@ -107,12 +105,9 @@ send_pkg_size: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - source /root/.bashrc - - ls -l $OMNIBUS_PACKAGE_DIR - if [[ "${ARCH}" == "amd64" ]]; then ls -l $OMNIBUS_PACKAGE_DIR_SUSE; fi - - source /root/.bashrc - export failures=0 - export last_stable=$(inv release.get-release-json-value "last_stable::${MAJOR_VERSION}") # Get stable packages from S3 buckets, send new package sizes & compare stable and new package sizes diff --git a/.gitlab/post_rc_build/post_rc_tasks.yml b/.gitlab/post_rc_build/post_rc_tasks.yml index 00efc95005fa5..f02bda3516650 100644 --- a/.gitlab/post_rc_build/post_rc_tasks.yml +++ b/.gitlab/post_rc_build/post_rc_tasks.yml @@ -11,9 +11,8 @@ update_rc_build_links: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - source /root/.bashrc - - export ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $JIRA_READ_API_TOKEN_SSM_NAME) - - export ATLASSIAN_USERNAME=robot-jira-agentplatform@datadoghq.com + - ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $ATLASSIAN_WRITE token) || exit $?; export ATLASSIAN_PASSWORD + - ATLASSIAN_USERNAME=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $ATLASSIAN_WRITE user) || exit $?; export ATLASSIAN_USERNAME - python3 -m pip install -r tasks/requirements_release_tasks.txt - PATCH=$(echo "$CI_COMMIT_REF_NAME" | cut -d'.' -f3 | cut -c1) - if [[ "$PATCH" == "0" ]]; then PATCH_OPTION=""; else PATCH_OPTION="-p"; fi diff --git a/.gitlab/powershell_script_deploy/powershell_script_deploy.yml b/.gitlab/powershell_script_deploy/powershell_script_deploy.yml new file mode 100644 index 0000000000000..53fce34835b75 --- /dev/null +++ b/.gitlab/powershell_script_deploy/powershell_script_deploy.yml @@ -0,0 +1,28 @@ +# We could (should?) piggy back on deploy_installer_packages_windows-x64 to also deploy this +# script to $S3_RELEASE_INSTALLER_ARTIFACTS_URI and have the agent-release-management repository +# publish it to a production bucket like ddagent-windows-stable. +# For now we can use the dd-agent-mstesting bucket to store the PowerShell script. +powershell_script_deploy: + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/gitlab_agent_deploy$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + stage: choco_and_install_script_deploy + rules: + !reference [.manual] + needs: ["powershell_script_signing"] + script: + - ls $WINDOWS_POWERSHELL_DIR + - $S3_CP_CMD $WINDOWS_POWERSHELL_DIR/Install-Datadog.ps1 s3://dd-agent-mstesting/Install-Datadog.ps1 --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 + +# Technically deploy_installer_packages_windows-x64 also uploads the bootstrapper to $S3_RELEASE_INSTALLER_ARTIFACTS_URI +# but it requires changes in agent-release-management to deploy to a production bucket like ddagent-windows-stable. +# For now we can use the dd-agent-mstesting bucket to store the bootstrapper. +windows_bootstrapper_deploy: + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/gitlab_agent_deploy$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + stage: choco_and_install_script_deploy + rules: + !reference [.manual] + needs: ["windows-installer-amd64"] + script: + - ls $OMNIBUS_PACKAGE_DIR + - $S3_CP_CMD $OMNIBUS_PACKAGE_DIR/datadog-installer-*-1-x86_64.exe s3://dd-agent-mstesting/datadog-installer-x86_64.exe --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 diff --git a/.gitlab/powershell_script_signing/powershell_script_signing.yml b/.gitlab/powershell_script_signing/powershell_script_signing.yml new file mode 100644 index 0000000000000..c2985179eee4c --- /dev/null +++ b/.gitlab/powershell_script_signing/powershell_script_signing.yml @@ -0,0 +1,16 @@ +powershell_script_signing: + tags: ["runner:windows-docker", "windowsversion:1809"] + stage: choco_and_install_script_build + needs: [] + variables: + ARCH: "x64" + rules: + !reference [.manual] + artifacts: + expire_in: 2 weeks + paths: + - $WINDOWS_POWERSHELL_DIR + script: + - mkdir $WINDOWS_POWERSHELL_DIR + - docker run --rm -v "$(Get-Location):c:\mnt" -e AWS_NETWORKING=true -e IS_AWS_CONTAINER=true 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} powershell -C "dd-wcs sign \mnt\tools\windows\DatadogAgentInstallScript\Install-Datadog.ps1" + - copy .\tools\windows\DatadogAgentInstallScript\Install-Datadog.ps1 $WINDOWS_POWERSHELL_DIR\Install-Datadog.ps1 diff --git a/.gitlab/setup/setup.yml b/.gitlab/setup/setup.yml index 3ef2bdd4dfeb0..28779481845c8 100644 --- a/.gitlab/setup/setup.yml +++ b/.gitlab/setup/setup.yml @@ -4,8 +4,7 @@ setup_agent_version: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - source /root/.bashrc - - inv -e agent.version --cache-version + - inv -e agent.version --cache-version || exit $? - $S3_CP_CMD $CI_PROJECT_DIR/agent-version.cache $S3_ARTIFACTS_URI/agent-version.cache needs: [] @@ -17,18 +16,17 @@ github_rate_limit_info: - !reference [.except_mergequeue] - when: on_success script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt datadog_api_client # Send stats for app 1 - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_SSM_NAME) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_1 key_b64) || exit $?; export GITHUB_KEY_B64 + - GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_1 app_id) || exit $?; export GITHUB_APP_ID + - GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_1 installation_id) || exit $?; export GITHUB_INSTALLATION_ID + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 1 # Send stats for app 2 - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_2_SSM_NAME) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_2_SSM_NAME) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_2_SSM_NAME) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_2 key_b64) || exit $?; export GITHUB_KEY_B64 + - GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_2 app_id) || exit $?; export GITHUB_APP_ID + - GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_2 installation_id) || exit $?; export GITHUB_INSTALLATION_ID + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 2 allow_failure: true diff --git a/.gitlab/source_test/ebpf.yml b/.gitlab/source_test/ebpf.yml index 11dfc05354333..012406f1b85aa 100644 --- a/.gitlab/source_test/ebpf.yml +++ b/.gitlab/source_test/ebpf.yml @@ -16,7 +16,6 @@ before_script: - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - - source /root/.bashrc script: - inv -e install-tools - inv -e system-probe.object-files @@ -53,7 +52,6 @@ tests_ebpf_arm64: paths: - $CI_PROJECT_DIR/kmt-deps before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - inv -e install-tools @@ -87,7 +85,6 @@ prepare_sysprobe_ebpf_functional_tests_x64: - $CI_PROJECT_DIR/kmt-deps - $DD_AGENT_TESTING_DIR/site-cookbooks/dd-security-agent-check/files before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - inv -e install-tools diff --git a/.gitlab/source_test/go_generate_check.yml b/.gitlab/source_test/go_generate_check.yml index ad4af5b2ee47c..51d3293a17370 100644 --- a/.gitlab/source_test/go_generate_check.yml +++ b/.gitlab/source_test/go_generate_check.yml @@ -10,7 +10,6 @@ security_go_generate_check: before_script: - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - - source /root/.bashrc - pip3 install wheel - pip3 install -r docs/cloud-workload-security/scripts/requirements-docs.txt - inv -e install-tools diff --git a/.gitlab/source_test/golang_deps_diff.yml b/.gitlab/source_test/golang_deps_diff.yml index 5a01ac2d74a13..6d156dd6f4963 100644 --- a/.gitlab/source_test/golang_deps_diff.yml +++ b/.gitlab/source_test/golang_deps_diff.yml @@ -12,11 +12,10 @@ golang_deps_diff: variables: KUBERNETES_CPU_REQUEST: 4 before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - inv -e diff.go-deps --report-file=deps-report.md --report-metrics --git-ref "${CI_COMMIT_REF_NAME}" artifacts: paths: @@ -32,6 +31,9 @@ golang_deps_commenter: - !reference [.except_deploy] - when: on_success needs: ["golang_deps_diff"] + variables: + # Not using the entrypoint script for the pr-commenter image + FF_KUBERNETES_HONOR_ENTRYPOINT: false script: # ignore error message about no PR, because it happens for dev branches without PRs - echo "${CI_COMMIT_REF_NAME}" - | @@ -59,11 +61,10 @@ golang_deps_send_count_metrics: - when: on_success needs: ["go_deps"] before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $API_KEY_ORG2_SSM_NAME) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - inv -e go-deps.send-count-metrics --git-sha "${CI_COMMIT_SHA}" --git-ref "${CI_COMMIT_REF_NAME}" golang_deps_test: @@ -74,7 +75,6 @@ golang_deps_test: - when: on_success needs: ["go_deps"] before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv -e go-deps.test-list diff --git a/.gitlab/source_test/include.yml b/.gitlab/source_test/include.yml index c71029608a40e..60666c8d4f5a1 100644 --- a/.gitlab/source_test/include.yml +++ b/.gitlab/source_test/include.yml @@ -12,5 +12,4 @@ include: - .gitlab/source_test/slack.yml - .gitlab/source_test/golang_deps_diff.yml - .gitlab/source_test/notify.yml - - .gitlab/source_test/technical_linters.yml - .gitlab/source_test/tooling_unit_tests.yml diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index c4d57130833a5..015aea09496a8 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -50,8 +50,7 @@ .upload_coverage: # Upload coverage files to Codecov. Never fail on coverage upload. - - source /root/.bashrc - - export CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $CODECOV_TOKEN_SSM_NAME) + - CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV_TOKEN) || exit $?; export CODECOV_TOKEN - inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true .linux_lint: @@ -65,7 +64,6 @@ script: - !reference [.retrieve_linux_go_deps] - !reference [.retrieve_linux_go_tools_deps] - - source /root/.bashrc && conda activate ddpy3 - inv -e rtloader.make --install-prefix=$CI_PROJECT_DIR/dev --python-runtimes "3" - inv -e rtloader.install - inv -e install-tools @@ -252,7 +250,6 @@ go_mod_tidy_check: extends: .linux_x64 needs: ["go_deps"] before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv -e check-mod-tidy @@ -266,10 +263,17 @@ new-e2e-unit-tests: extends: .linux_tests image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES tags: ["arch:amd64"] + needs: + - !reference [ .needs_new_e2e_template ] + - go_deps + - go_tools_deps before_script: + - !reference [.retrieve_linux_go_deps] + - !reference [.retrieve_linux_go_tools_deps] + - !reference [.retrieve_linux_go_e2e_deps] # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $AGENT_QA_PROFILE_SSM_NAME >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" @@ -282,4 +286,6 @@ new-e2e-unit-tests: KUBERNETES_MEMORY_REQUEST: 12Gi KUBERNETES_MEMORY_LIMIT: 16Gi KUBERNETES_CPU_REQUEST: 6 + # Not using the entrypoint script for the e2e runner image + FF_KUBERNETES_HONOR_ENTRYPOINT: false timeout: 10m diff --git a/.gitlab/source_test/macos.yml b/.gitlab/source_test/macos.yml index 76adde53972bf..b23adc6f2158b 100644 --- a/.gitlab/source_test/macos.yml +++ b/.gitlab/source_test/macos.yml @@ -11,7 +11,6 @@ tests_macos: variables: PYTHON_RUNTIMES: "3" script: - - source /root/.bashrc - !reference [.setup_macos_github_app] - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache . - export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -) @@ -31,25 +30,6 @@ tests_macos: reports: junit: "**/junit-out-*.xml" -lint_macos: - stage: source_test - rules: - - !reference [.except_mergequeue] - - when: on_success - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - needs: ["setup_agent_version"] - variables: - PYTHON_RUNTIMES: "3" - timeout: 6h - script: - - source /root/.bashrc - - !reference [.setup_macos_github_app] - - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache . - - export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -) - - python3 -m pip install -r tasks/libs/requirements-github.txt - - inv -e github.trigger-macos --workflow-type "lint" --datadog-agent-ref "$CI_COMMIT_SHA" --python-runtimes "$PYTHON_RUNTIMES" --version-cache "$VERSION_CACHE_CONTENT" - .macos_gitlab: variables: PYTHON_RUNTIMES: "3" @@ -79,38 +59,64 @@ lint_macos: - pyenv rehash - inv -e rtloader.make --python-runtimes $PYTHON_RUNTIMES - inv -e rtloader.install - - inv -e install-tools - - inv -e deps .lint_macos_gitlab: stage: source_test - allow_failure: true extends: .macos_gitlab - needs: ["setup_agent_version"] + needs: ["go_deps", "go_tools_deps"] script: + - !reference [.retrieve_linux_go_deps] + - !reference [.retrieve_linux_go_tools_deps] - inv -e linter.go --cpus 12 --timeout 60 .tests_macos_gitlab: stage: source_test - allow_failure: true rules: - !reference [.manual] + - !reference [.except_mergequeue] + - when: on_success + allow_failure: true extends: .macos_gitlab - needs: ["setup_agent_version"] + needs: ["go_deps", "go_tools_deps"] variables: TEST_OUTPUT_FILE: test_output.json script: + - !reference [.retrieve_linux_go_deps] + - !reference [.retrieve_linux_go_tools_deps] + - inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH - FAST_TESTS_FLAG="" - if [[ "$FAST_TESTS" == "true" ]]; then FAST_TESTS_FLAG="--only-impacted-packages"; fi - inv -e test --rerun-fails=2 --python-runtimes $PYTHON_RUNTIMES --race --profile --cpus 12 --save-result-json $TEST_OUTPUT_FILE --junit-tar "junit-${CI_JOB_NAME}.tgz" $FAST_TESTS_FLAG - inv -e invoke-unit-tests + artifacts: + expire_in: 2 weeks + when: always + paths: + - $TEST_OUTPUT_FILE + - junit-*.tgz + reports: + junit: "**/junit-out-*.xml" + annotations: + - $EXTERNAL_LINKS_PATH + +.upload_junit_source: + - $CI_PROJECT_DIR/tools/ci/junit_upload.sh + +.upload_coverage: + # Upload coverage files to Codecov. Never fail on coverage upload. + - CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV_TOKEN) || exit $?; export CODECOV_TOKEN + - inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true + lint_macos_gitlab_amd64: extends: .lint_macos_gitlab tags: ["macos:monterey-amd64", "specific:true"] + rules: + - !reference [.except_mergequeue] + - when: on_success lint_macos_gitlab_arm64: extends: .lint_macos_gitlab + allow_failure: true rules: - !reference [.on_main] - !reference [.manual] @@ -119,7 +125,15 @@ lint_macos_gitlab_arm64: tests_macos_gitlab_amd64: extends: .tests_macos_gitlab tags: ["macos:monterey-amd64", "specific:true"] + after_script: + - !reference [.upload_junit_source] + - !reference [.upload_coverage] tests_macos_gitlab_arm64: extends: .tests_macos_gitlab + rules: + !reference [.manual] tags: ["macos:monterey-arm64", "specific:true"] + after_script: + - !reference [.upload_junit_source] + - !reference [.upload_coverage] diff --git a/.gitlab/source_test/notify.yml b/.gitlab/source_test/notify.yml index aee23f82cfae7..097a12f564aff 100644 --- a/.gitlab/source_test/notify.yml +++ b/.gitlab/source_test/notify.yml @@ -7,7 +7,6 @@ unit_tests_notify: - !reference [.except_disable_unit_tests] - when: always script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - !reference [.setup_agent_github_app] - inv notify.unit-tests --pipeline-id $CI_PIPELINE_ID --pipeline-url $CI_PIPELINE_URL --branch-name $CI_COMMIT_REF_NAME diff --git a/.gitlab/source_test/slack.yml b/.gitlab/source_test/slack.yml index 920d88bed12ff..5d357a98d2446 100644 --- a/.gitlab/source_test/slack.yml +++ b/.gitlab/source_test/slack.yml @@ -9,6 +9,5 @@ slack_teams_channels_check: - !reference [.except_mergequeue] - when: on_success script: - - source /root/.bashrc - python3 -m pip install codeowners -c tasks/libs/requirements-notifications.txt - inv -e notify.check-teams diff --git a/.gitlab/source_test/technical_linters.yml b/.gitlab/source_test/technical_linters.yml deleted file mode 100644 index 5bf8f5fe25518..0000000000000 --- a/.gitlab/source_test/technical_linters.yml +++ /dev/null @@ -1,27 +0,0 @@ -lint_python: - stage: source_test - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - needs: [] - script: - - source /root/.bashrc - - inv -e linter.python - -lint_update_go: - stage: source_test - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - needs: [] - script: - - source /root/.bashrc - - inv -e linter.update-go - -validate_modules: - stage: source_test - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - needs: [] - script: - - source /root/.bashrc - - inv -e modules.validate - - inv -e modules.validate-used-by-otel diff --git a/.gitlab/source_test/tooling_unit_tests.yml b/.gitlab/source_test/tooling_unit_tests.yml index 25419b084d120..e7a7ab4e1c133 100644 --- a/.gitlab/source_test/tooling_unit_tests.yml +++ b/.gitlab/source_test/tooling_unit_tests.yml @@ -8,7 +8,6 @@ invoke_unit_tests: rules: - !reference [.on_invoke_tasks_changes] script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - inv -e invoke-unit-tests.run @@ -20,7 +19,6 @@ kitchen_invoke_unit_tests: rules: - !reference [.on_kitchen_invoke_tasks_changes] script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - pushd test/kitchen - inv -e kitchen.invoke-unit-tests diff --git a/.gitlab/source_test/windows.yml b/.gitlab/source_test/windows.yml index f6ff9f865360e..dfa1d0e2aaeb8 100644 --- a/.gitlab/source_test/windows.yml +++ b/.gitlab/source_test/windows.yml @@ -39,11 +39,11 @@ -e EXTRA_OPTS="${FAST_TESTS_FLAG}" -e TEST_WASHER=true -e GO_TEST_SKIP_FLAKE="${GO_TEST_SKIP_FLAKE}" - -e API_KEY_ORG2_SSM_NAME="${API_KEY_ORG2_SSM_NAME}" - -e CODECOV_TOKEN_SSM_NAME="${CODECOV_TOKEN_SSM_NAME}" + -e API_KEY_ORG2="${API_KEY_ORG2}" + -e CODECOV_TOKEN="${CODECOV_TOKEN}" -e S3_PERMANENT_ARTIFACTS_URI="${S3_PERMANENT_ARTIFACTS_URI}" -e COVERAGE_CACHE_FLAG="${COVERAGE_CACHE_FLAG}" - -e GITLAB_TOKEN_SSM_NAME="${GITLAB_READ_API_TOKEN_SSM_NAME}" + -e GITLAB_TOKEN="${GITLAB_READ_API_TOKEN}" 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\unittests.bat - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } diff --git a/.gitlab/trigger_release/trigger_release.yml b/.gitlab/trigger_release/trigger_release.yml index 11b204e7c7a9c..c0c67e2d50d49 100644 --- a/.gitlab/trigger_release/trigger_release.yml +++ b/.gitlab/trigger_release/trigger_release.yml @@ -18,9 +18,8 @@ script: # agent-release-management creates pipeline for both Agent 6 and Agent 7 # when triggered with major version 7 - - source /root/.bashrc - - export RELEASE_VERSION=$(inv agent.version --major-version 7 --url-safe --omnibus-format)-1 - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME) + - RELEASE_VERSION="$(inv agent.version --major-version 7 --url-safe --omnibus-format)-1" || exit $?; export RELEASE_VERSION + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - 'inv pipeline.trigger-child-pipeline --project-name "DataDog/agent-release-management" --git-ref "main" --variable ACTION --variable AUTO_RELEASE diff --git a/.go-version b/.go-version index 013173af5e9bc..87b26e8b1aa0e 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.6 +1.22.7 diff --git a/.wwhrd.yml b/.wwhrd.yml index 84cd078577d5b..5ffc72c725368 100644 --- a/.wwhrd.yml +++ b/.wwhrd.yml @@ -45,4 +45,4 @@ exceptions: additional: # list here paths to additional licenses - golang/go: "raw.githubusercontent.com/golang/go/go1.22.6/LICENSE" + golang/go: "raw.githubusercontent.com/golang/go/go1.22.7/LICENSE" diff --git a/CHANGELOG-DCA.rst b/CHANGELOG-DCA.rst index 7a45c65b38675..786b6903a4c0d 100644 --- a/CHANGELOG-DCA.rst +++ b/CHANGELOG-DCA.rst @@ -2,6 +2,58 @@ Release Notes ============= +.. _Release Notes_7.57.0: + +7.57.0 +====== + +.. _Release Notes_7.57.0_Prelude: + +Prelude +------- + +Released on: 2024-09-09 +Pinned to datadog-agent v7.57.0: `CHANGELOG `_. + +.. _Release Notes_7.57.0_New Features: + +New Features +------------ + +- The Cluster Agent now supports activating Continuous Profiling + using Admission Controller. + +- ``LimitRange`` and ``StorageClass`` resources are now collected by the orchestrator check. + + +.. _Release Notes_7.57.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- The auto-instrumentation webhook (beta) uses a new injector library. + + +.. _Release Notes_7.57.0_Bug Fixes: + +Bug Fixes +--------- + +- Fixes a rare bug where some Kubernetes events would be emitted + without a timestamp and would be dropped upstream as a result. + +- Library package versions for auto-instrumentation are now set to the latest major + version of the library-package instead of `latest`. + + * java:v1 + * dotnet:v2 + * python:v2 + * ruby:v2 + * js:v5 + +- Fix APIServer error logs generated when external metrics endpoint is activated + + .. _Release Notes_7.56.2: 7.56.2 diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 10ea12b5fa3ac..abbc7762d316e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,264 @@ Release Notes ============= +.. _Release Notes_7.57.2: + +7.57.2 +====== + +.. _Release Notes_7.57.2_Prelude: + +Prelude +------- + +Release on: 2024-09-24 + + +.. _Release Notes_7.57.2_Enhancement Notes: + +Enhancement Notes +----------------- + +- Agents are now built with Go ``1.22.7``. + + +.. _Release Notes_7.57.2_Bug Fixes: + +Bug Fixes +--------- + +- Fix OOM error with cluster agent auto instrumentation by increasing default memory request from 20Mi to 100Mi. + +- Fixes a panic caused by running the Agent on readonly filesystems. The + Agent returns integration launchers and handles memory gracefully. + + +.. _Release Notes_7.57.1: + +7.57.1 +====== + +.. _Release Notes_7.57.1_Prelude: + +Prelude +------- + +Release on: 2024-09-17 + +- Please refer to the `7.57.1 tag on integrations-core `_ for the list of changes on the Core Checks + +.. _Release Notes_7.57.1_Bug Fixes: + +Bug Fixes +--------- + +- APM: When the UDS listener cannot be created on the trace-agent, the process will log the error, instead of crashing. +- Fixes memory leak caused by container check. + + +.. _Release Notes_7.57.0: + +7.57.0 +====== + +.. _Release Notes_7.57.0_Prelude: + +Prelude +------- + +Release on: 2024-09-09 + +- Please refer to the `7.57.0 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.57.0_Upgrade Notes: + +Upgrade Notes +------------- + +- Update cURL to 8.9.1. + +- Update OpenSSL from 3.0.14 to 3.3.1 (on Linux and macOS). + + +.. _Release Notes_7.57.0_New Features: + +New Features +------------ + +- The `agent diagnose` command now includes a ``--json`` option to output the results in JSON format. + +- Add `integration` value for device metadata. + +- APM: In order to allow for automatic instrumentation to work in Kubernetes + clusters that enforce a ``Restricted`` `Pod Security Standard `_, + which require all containers to explicitly set a ``securityContext``, + an option to configure a `securityContext` to be used for all ``initContainers`` + created by the auto instrumentation has been added. + | This can be done through the ``DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_SECURITY_CONTEXT`` + environment value, or ``admission_controller.auto_instrumentation.init_security_context`` configuration - + in both cases a ``json`` string should be supplied. + +- Adds a `kube_runtime_class` tag to metrics associated with Kubernetes + pods and their containers. + +- Expose the Agent's get host tags function to python checks using the new `datadog_agent.get_host_tags` method. + +- Implement static allowlist of Kubernetes events to send by default. + This feature is only enabled when ``filtering_enabled`` is set to + ``true`` in the ``kubernetes_apiserver`` integration configuration. + +- Adds a new launcher to handle incoming logs from integtrations. + +- Add optional reverse DNS enrichment of private IP addresses to NDM NetFlow. + +- On Windows, the default value for the service inference feature is now enabled. + + +.. _Release Notes_7.57.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- Turn on Orchestrator Explorer by default in the core agent + +- Added new source_host tag to TCP/UDP logs to help users understand where their logs came from. + +- Added support to handling UDP/TCP Logs when running the containerized agent. + +- APM: Allow custom HTTP client to be provided when instantiating the + trace-agent configuration. This feature is primarily intended for the + OpenTelemetry exporter. + +- APM: Add default UDS listeners for traces (trace-agent) and + dogstatsd (core-agent) on /var/run/datadog/apm.socket and + /var/run/datadog/dsd.socket, respectively. + These are used in the Single Step APM Instrumentation, improving + the onboarding experience and minimizing the agent configuration. + +- For the [Inferred Service Dependencies beta](https://docs.datadoghq.com/tracing/guide/inferred-service-opt-in/?tab=java), add two new `peer.hostname` precursor attributes, `out.host` and `dns.hostname`. This will improve coverage of inferred services because some tracer integrations only place the peer hostname in one of those attributes. + +- APM stats for internal service overrides are now aggregated by the `_dd.base_service` tag only, enhancing visibility into specific base services. + +- Include spans with `span.kind=consumer` for aggregation of + stats on peer tags. + +- IP address quantization on all peer tags is done the backend during ingestion. This change updates the Agent to apply the same IP address quantization. This reduces unnecessary aggregation that is currently done on raw IP addresses. And therefore, improves the aggregation performance of stats on peer tags. + +- APM: Add new setting to disable the HTTP receiver in the + trace-agent. This setting should almost never be disabled and + is only a convenience parameter for OpenTelemetry extensions. + Disabling the receiver is semantically equivalent to setting the + receiver_port to 0 and receiver_socket to "". + +- Agents are now built with Go ``1.22.6``. + +- [NDM] Adds the option to collect BGP neighbors metrics from Cisco SD-WAN. + +- [NDM] Add option to collect cloud application metrics from Cisco SD-WAN. + +- [Cisco SD-WAN] Allow enabling/disabling metrics collection. + +- Report the hostname of Kubernetes events based on the associated + pod that the event relates to. + +- Introduces a parser to extract tags from integration logs and attach them to outgoing logs. + +- Implement External Data environment variable injection in the Admission Controller. + Format for this new environment variable is `it-INIT_CONTAINER,cn-CONTAINER_NAME,pu-POD_UID`. + This new variable is needed for the New Origin Detection spec. It is used for Origin Detection + in case Local Data are unavailable, for example with Kata Containers and CGroups v2. + +- Upgraded JMXFetch to `0.49.3 `_ which adds support for jsr77 j2ee statistics + and custom ConnectionFactory. See `0.49.3 `_ for more details. + +- Windows Agent Installer gives a better error message when a gMSA + account is provided for ``ddagentuser`` that Windows does not recognize. + +- Uninstalling the Windows Agent MSI Installer removes specific + subdirectories of the install path to help prevent data loss when + ``PROJECTLOCATION`` is misconfigured to an existing directory. + +- Adds a default upper limit of 10000 to the number of network traffic + paths that are captured at a single time. The user can increase or + decrease this limit as needed. + +- Language detection can run on the core Agent without needing a gRPC server. + +- Add Hostname and ExtraTags to `CollectorECSTask`. + +- Collect SystemInfo for Pods and ECS Tasks. + +- Implement API that allows Python checks to send logs for + eventual submission. + +- Users can use ``DD_ORCHESTRATOR_EXPLORER_CUSTOM_SENSITIVE_ANNOTATIONS_LABELS`` to remove sensitive annotations and labels. + For example: ``DD_ORCHESTRATOR_EXPLORER_CUSTOM_SENSITIVE_ANNOTATIONS_LABELS="sensitive-key-1 sensitive-key-2"``. + Keys should be separated by spaces. The agent removes any annotations and labels matching these keys. + +- Add the ability to tag interface metrics with user-defined tags. + + +.. _Release Notes_7.57.0_Security Notes: + +Security Notes +-------------- + +- Fix CVE-2024-41110. + + +.. _Release Notes_7.57.0_Bug Fixes: + +Bug Fixes +--------- + +- Results of `agent config` did not reflect the actual runtime config for the other services. This will have other Datadog Agent services (e.g. trace-agent) running as a systemd service read the same environment variables from a text file `/etc/datadog-agent/environment` as the core Agent process. + +- [DBM] Bump go-sqllexer to 0.0.13 to fix a bug where the table name is incorrectly collected on PostgreSQL SELECT ONLY statement. + +- [Cisco SD-WAN] Do not collect unspecified IP addresses. + +- Fix `container.net.*` metrics accuracy on Linux. Currently `container.net.*` metrics are always emitted with high cardinality tags while the values may not represent actual container-level values but POD-level values (multiple containers in a pod) or host-level values (containers running in host network). With this bug fix, the `container.net.*` metrics aren't emitted for containers running in host network and a single timeseries is emitted by pods when running multiple containers. Finally, in non-Kubernetes environments, if multiple containers share the same network namespace, `container.net.*` metrics won't be emitted. + +- Fix duplicate logging in Process Agent component's Enabled() method. + +- Fixed bug in kubelet check when running in core agent that + was causing `kubernetes.kubelet.container.log_filesystem.used_bytes` + to be reported by the check for excluded/non-existing containers. + The metric was being reported in this case without tags. + This bug does not exist in the python integration version of the + kubelet check. + +- Fixes a bug on Windows in the driver installation custom actions that could prevent rollback from working properly if an installation failed or was canceled. + +- Update pro-bing library to include fix for a Windows specific issue with large ICMP packets + +- [oracle] Fix wrong durations for cloud databases. + +- Stop chunking outputs in manual checks for container, process, and process_discovery checks to allow JSON unmarshaler to parse output. + +- Remove the original pod annotation on consul + +- Fix pod status for pods using native sidecars. + +- Fix a regression where the Agent would fail to start on systems with SysVinit. + +- APM: Fixes issue where the number of HTTP decoders was incorrectly set if setting GOMAXPROCS to milli-cpu values. + + +.. _Release Notes_7.57.0_Other Notes: + +Other Notes +----------- + +- Add metrics origins for vLLM integration. + +- Add deprecation warnings when running process checks on the Process Agent in Linux. + This change prepares for the deprecation of processes and container collection in the Process Agent, occurring in a future release. + +- Add metric origin for the AWS Neuron integration + + .. _Release Notes_7.56.2: 7.56.2 @@ -78,7 +336,7 @@ Upgrade Notes New Features ------------ -- The core Agent now supports multiple configuration files in addition to the main ``datadog.yaml`` file. +- The core Agent now supports multiple configuration files in addition to the main ``datadog.yaml`` file. The -E flag can be used to specify additional configuration files to be loaded sequentially after the main ``datadog.yaml``. - When ``DD_SERVERLESS_STREAM_LOGS`` is enabled, DD_EXTENSION @@ -102,10 +360,10 @@ Enhancement Notes - APM: Add obfuscation support for OpenSearch statements within span metadata. This feature works in the same way as the existing Elasticsearch one, and is enabled by default. It is configured by binding ``apm_config.obfuscation.opensearch.*`` parameters to new obfuscation environment variables. In particular, bind: ``apm_config.obfuscation.opensearch.enabled`` to ``DD_APM_OBFUSCATION_OPENSEARCH_ENABLED``: It accepts a boolean value with default value true. - + ``apm_config.obfuscation.opensearch.keep_values`` to ``DD_APM_OBFUSCATION_OPENSEARCH_KEEP_VALUES`` It accepts a list of strings of the form ``["id1", "id2"]``. - + ``apm_config.obfuscation.opensearch.obfuscate_sql_values`` to ``DD_APM_OBFUSCATION_OPENSEARCH_OBFUSCATE_SQL_VALUES`` It accepts a list of strings of the form ``["key1", "key2"]``. @@ -145,7 +403,7 @@ Enhancement Notes to better support users with multi-byte character sets, for example, Korean, Arabic, etc. This should alleviate crashes caused by long queries using these characters. -- The OTLP ingestion endpoint now supports the same settings and protocol as +- The OTLP ingestion endpoint now supports the same settings and protocol as the OpenTelemetry Collector OTLP receiver v0.103.0. - APM: Probabilistic Sampler now only looks at the lower 64 bits of a trace ID by default to improve compatibility in distributed systems where some apps may truncate the trace ID. To maintain the previous behavior use the feature flag `probabilistic_sampler_full_trace_id`. @@ -188,7 +446,7 @@ Deprecation Notes Security Notes -------------- -- Updating OpenSSL to 3.0.14 to address CVE-2024-4741. +- Updating OpenSSL to 3.0.14 to address CVE-2024-4741 (on Linux and macOS). .. _Release Notes_7.56.0_Bug Fixes: @@ -204,7 +462,7 @@ Bug Fixes - Re-enable printing of checks metadata in the ``datadog-agent status`` collector section. -- Fix OTLP status output not being displayed in the GUI. +- Fix OTLP status output not being displayed in the GUI. - Fix issue where init config for ping took priority over instance config. diff --git a/Dockerfiles/agent-ot/Dockerfile.agent-otel b/Dockerfiles/agent-ot/Dockerfile.agent-otel index cb0fb6109e780..cf314ed6f47cc 100644 --- a/Dockerfiles/agent-ot/Dockerfile.agent-otel +++ b/Dockerfiles/agent-ot/Dockerfile.agent-otel @@ -1,9 +1,11 @@ -ARG AGENT_VERSION=nightly-ot-beta-main-jmx +ARG AGENT_VERSION=7.57.0-v1.0-ot-beta-jmx +ARG AGENT_BRANCH=7.57.x-otel-beta-v1 # Use the Ubuntu Slim AMD64 base image FROM ubuntu:24.04 AS builder # Set environment variables ARG AGENT_VERSION +ARG AGENT_BRANCH ENV DEBIAN_FRONTEND=noninteractive # Set the working directory @@ -24,7 +26,7 @@ RUN apt-get update && \ && rm -rf /var/lib/apt/lists/* # TEMP: Use github source code -RUN git clone --depth 1 https://github.com/DataDog/datadog-agent.git datadog-agent-${AGENT_VERSION} +RUN git clone --depth 1 -b "${AGENT_BRANCH}" --single-branch https://github.com/DataDog/datadog-agent.git datadog-agent-${AGENT_VERSION} # Once we have stable releases, we can use the following code to download the source code # TODO: use released agent version once we have an agent release with the otel binary @@ -77,7 +79,7 @@ RUN . venv/bin/activate && invoke collector.generate RUN . venv/bin/activate && invoke otel-agent.build # Use the final Datadog agent image -FROM datadog/agent-dev:${AGENT_VERSION} +FROM datadog/agent:${AGENT_VERSION} ARG AGENT_VERSION # Copy the built OTel agent from the builder stage COPY --from=builder /workspace/datadog-agent-${AGENT_VERSION}/bin/otel-agent/otel-agent /opt/datadog-agent/embedded/bin/otel-agent diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 219896d013391..5a421c0fab4a2 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -591,7 +591,6 @@ core,github.com/aws/aws-sdk-go-v2/service/sts,Apache-2.0,"Copyright 2014-2015 St core,github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go-v2/service/sts/types,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/aws,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/aws/arn,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/aws/auth/bearer,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/aws/awserr,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/aws/awsutil,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." @@ -613,9 +612,6 @@ core,github.com/aws/aws-sdk-go/aws/session,Apache-2.0,"Copyright 2014-2015 Strip core,github.com/aws/aws-sdk-go/aws/signer/v4,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/internal/context,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/internal/ini,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/internal/s3shared,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/internal/s3shared/arn,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/internal/s3shared/s3err,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/internal/sdkio,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/internal/sdkmath,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/internal/sdkrand,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." @@ -623,24 +619,19 @@ core,github.com/aws/aws-sdk-go/internal/sdkuri,Apache-2.0,"Copyright 2014-2015 S core,github.com/aws/aws-sdk-go/internal/shareddefaults,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/internal/strings,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/internal/sync/singleflight,BSD-3-Clause,"Copyright (c) 2009 The Go Authors. All rights reserved | Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/private/checksum,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol/ec2query,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/private/protocol/eventstream,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol/json/jsonutil,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol/jsonrpc,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol/query,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol/query/queryutil,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol/rest,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol/restjson,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/private/protocol/restxml,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/ec2,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/ec2/ec2iface,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/ecs,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/lightsail,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/service/s3,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/sso,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/sso/ssoiface,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/ssooidc,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." @@ -676,7 +667,6 @@ core,github.com/bahlo/generic-list-go,BSD-3-Clause,Copyright (c) 2009 The Go Aut core,github.com/beevik/ntp,BSD-2-Clause,Al Cutter (AlCutter) | Andrey Smirnov (smira) | Anton Tolchanov (knyar) | Ask Bjørn Hansen (abh) | Brett Vickers (beevik) | Christopher Batey (chbatey) | Copyright © 2015-2023 Brett Vickers. All rights reserved | Leonid Evdokimov (darkk) | Meng Zhuo (mengzhuo) | Mikhail Salosin (AlphaB) | Silves-Xiang (silves-xiang) core,github.com/benbjohnson/clock,MIT,Copyright (c) 2014 Ben Johnson core,github.com/beorn7/perks/quantile,MIT,Copyright (C) 2013 Blake Mizerany -core,github.com/bgentry/go-netrc/netrc,MIT,Copyright © 2010 Fazlul Shahriar . Newer | Copyright © 2014 Blake Gentry core,github.com/bhmj/jsonslice,MIT,Copyright (c) 2018 bhmj core,github.com/bitnami/go-version/pkg/version,Apache-2.0,Copyright (c) 2023-2024 Carlos Rodríguez Hernández core,github.com/blabber/go-freebsd-sysctl/sysctl,0BSD,Copyright (c) 2014-2020 by Tobias Rehbein @@ -1055,6 +1045,7 @@ core,github.com/go-redis/redis/v9/internal/util,BSD-2-Clause,Copyright (c) 2013 core,github.com/go-resty/resty/v2,MIT,"Copyright (c) 2015-2023 Jeevanandam M., https://myjeeva.com " core,github.com/go-sql-driver/mysql,MPL-2.0,"Aaron Hopkins | Achille Roussel | Aidan | Alex Snast | Alexey Palazhchenko | Andrew Reid | Animesh Ray | Ariel Mashraki | Arne Hormann | Asta Xie | Barracuda Networks, Inc. | Brian Hendriks | Bulat Gaifullin | Caine Jette | Carlos Nieto | Chris Kirkland | Chris Moos | Counting Ltd. | Craig Wilson | Daemonxiao <735462752 at qq.com> | Daniel Montoya | Daniel Nichter | Daniël van Eeden | Dave Protasowski | DigitalOcean Inc. | DisposaBoy | Dolthub Inc. | Egor Smolyakov | Erwan Martin | Evan Elias | Evan Shaw | Facebook Inc. | Frederick Mayle | GitHub Inc. | Google Inc. | Gustavo Kristic | Gusted | Hajime Nakagami | Hanno Braun | Henri Yandell | Hirotaka Yamamoto | Huyiguang | ICHINOSE Shogo | INADA Naoki | Ilia Cimpoes | InfoSum Ltd. | Jacek Szwec | James Harr | Janek Vedock | Jason Ng | Jean-Yves Pellé | Jeff Hodges | Jeffrey Charles | Jennifer Purevsuren | Jerome Meyer | Jiajia Zhong | Jian Zhen | Joshua Prunier | Julien Lefevre | Julien Schmidt | Justin Li | Justin Nuß | Kamil Dziedzic | Kei Kamikawa | Kevin Malachowski | Keybase Inc. | Kieron Woodhouse | Lance Tian | Lennart Rudolph | Leonardo YongUk Kim | Linh Tran Tuan | Lion Yang | Luca Looz | Lucas Liu | Luke Scott | Lunny Xiao | Maciej Zimnoch | Michael Woolnough | Microsoft Corp. | Multiplay Ltd. | Nathanial Murphy | Nicola Peduzzi | Oliver Bone | Olivier Mengué | Paul Bonser | Paulius Lozys | Percona LLC | Peter Schultz | Phil Porada | PingCAP Inc. | Pivotal Inc. | Rebecca Chin | Reed Allman | Richard Wilkes | Robert Russell | Runrioter Wung | Samantha Frank | Santhosh Kumar Tekuri | Shattered Silicon Ltd. | Sho Iizuka | Sho Ikeda | Shuode Li | Simon J Mudd | Soroush Pour | Stan Putrya | Stanley Gunawan | Steven Hartland | Stripe Inc. | Tan Jinhua <312841925 at qq.com> | Tetsuro Aoki | Thomas Wodarek | Tim Ruffles | Tom Jenkinson | Vladimir Kovpak | Vladyslav Zhelezniak | Xiangyu Hu | Xiaobing Jiang | Xiuming Chen | Xuehong Chan | Zendesk Inc. | Zhang Xiang | Zhenye Xie | Zhixin Wen | Ziheng Lyu | copyright doctrines of fair use, fair dealing, or other | dyves labs AG | oscarzhao " core,github.com/go-viper/mapstructure/v2,MIT,Copyright (c) 2013 Mitchell Hashimoto +core,github.com/go-viper/mapstructure/v2/internal/errors,MIT,Copyright (c) 2013 Mitchell Hashimoto core,github.com/go-zookeeper/zk,BSD-3-Clause,"Copyright (c) 2013, Samuel Stauffer " core,github.com/gobwas/glob,MIT,Copyright (c) 2016 Sergey Kamardin core,github.com/gobwas/glob/compiler,MIT,Copyright (c) 2016 Sergey Kamardin @@ -1275,14 +1266,11 @@ core,github.com/hashicorp/consul/api,MPL-2.0,"Copyright © 2014-2018 HashiCorp, core,github.com/hashicorp/cronexpr,Apache-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/errwrap,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/go-cleanhttp,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" -core,github.com/hashicorp/go-getter,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" -core,github.com/hashicorp/go-getter/helper/url,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/go-hclog,MIT,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/go-immutable-radix,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/go-multierror,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/go-retryablehttp,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/go-rootcerts,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" -core,github.com/hashicorp/go-safetemp,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/go-secure-stdlib/parseutil,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/go-secure-stdlib/strutil,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" core,github.com/hashicorp/go-sockaddr,MPL-2.0,"Copyright © 2014-2018 HashiCorp, Inc" @@ -1470,7 +1458,6 @@ core,github.com/microsoft/go-rustaudit,MIT,Copyright (c) Microsoft Corporation core,github.com/miekg/dns,BSD-3-Clause,"Alex A. Skinner | Alex Sergeyev | Andrew Tunnell-Jones | Ask Bjørn Hansen | Copyright (c) 2009, The Go Authors. Extensions copyright (c) 2011, Miek Gieben | Copyright 2009 The Go Authors. All rights reserved. Use of this source code | Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is | Copyright 2014 CloudFlare. All rights reserved. Use of this source code is | Dave Cheney | Dusty Wilson | James Hartig | Marek Majkowski | Miek Gieben | Omri Bahumi | Peter van Dijk | copyright (c) 2011 Miek Gieben" core,github.com/mitchellh/copystructure,MIT,Copyright (c) 2014 Mitchell Hashimoto core,github.com/mitchellh/go-homedir,MIT,Copyright (c) 2013 Mitchell Hashimoto -core,github.com/mitchellh/go-testing-interface,MIT,Copyright (c) 2016 Mitchell Hashimoto core,github.com/mitchellh/hashstructure/v2,MIT,Copyright (c) 2016 Mitchell Hashimoto core,github.com/mitchellh/mapstructure,MIT,Copyright (c) 2013 Mitchell Hashimoto core,github.com/mitchellh/reflectwalk,MIT,Copyright (c) 2013 Mitchell Hashimoto @@ -2185,7 +2172,6 @@ core,github.com/twmb/franz-go/pkg/kversion,BSD-3-Clause,"Copyright 2020, Travis core,github.com/twmb/franz-go/pkg/sasl,BSD-3-Clause,"Copyright 2020, Travis Bischel" core,github.com/twmb/murmur3,BSD-3-Clause,"Copyright 2013, Sébastien Paolacci | Copyright 2018, Travis Bischel" core,github.com/ugorji/go/codec,MIT,Copyright (c) 2012-2020 Ugorji Nwoke -core,github.com/ulikunitz/xz,BSD-3-Clause,Copyright (c) 2014-2022 Ulrich Kunitz core,github.com/ulikunitz/xz/internal/hash,BSD-3-Clause,Copyright (c) 2014-2022 Ulrich Kunitz core,github.com/ulikunitz/xz/internal/xlog,BSD-3-Clause,Copyright (c) 2014-2022 Ulrich Kunitz core,github.com/ulikunitz/xz/lzma,BSD-3-Clause,Copyright (c) 2014-2022 Ulrich Kunitz @@ -2352,12 +2338,9 @@ core,go.opentelemetry.io/collector/exporter/exporterbatcher,Apache-2.0,Copyright core,go.opentelemetry.io/collector/exporter/exporterhelper,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exporterqueue,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/exporter/internal/common,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/internal/experr,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/internal/otlptext,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/internal/queue,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/exporter/loggingexporter,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/exporter/loggingexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/nopexporter,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/nopexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/otlpexporter,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2405,7 +2388,6 @@ core,go.opentelemetry.io/collector/pdata/pmetric,Apache-2.0,Copyright The OpenTe core,go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/ptrace,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/pdata/testdata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/batchprocessor,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/batchprocessor/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2453,6 +2435,7 @@ core,go.opentelemetry.io/contrib/config,Apache-2.0,Copyright The OpenTelemetry A core,go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/propagators/b3,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2522,7 +2505,6 @@ core,go.opentelemetry.io/otel/semconv/v1.17.0,Apache-2.0,Copyright The OpenTelem core,go.opentelemetry.io/otel/semconv/v1.17.0/httpconv,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.20.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.21.0,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/otel/semconv/v1.24.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.25.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.26.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/trace,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2633,17 +2615,17 @@ core,golang.org/x/net/proxy,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/net/publicsuffix,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/net/trace,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/net/websocket,BSD-3-Clause,Copyright 2009 The Go Authors -core,golang.org/x/oauth2,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/authhandler,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/clientcredentials,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google/externalaccount,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google/internal/externalaccountauthorizeduser,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google/internal/impersonate,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google/internal/stsexchange,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/internal,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/jws,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/jwt,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved +core,golang.org/x/oauth2,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/authhandler,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/clientcredentials,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google/externalaccount,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google/internal/externalaccountauthorizeduser,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google/internal/impersonate,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google/internal/stsexchange,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/internal,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/jws,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/jwt,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/sync/errgroup,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/sync/semaphore,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/sync/singleflight,BSD-3-Clause,Copyright 2009 The Go Authors diff --git a/cmd/agent/common/autodiscovery.go b/cmd/agent/common/autodiscovery.go index 6323d13ba3c03..8ff855aeca19f 100644 --- a/cmd/agent/common/autodiscovery.go +++ b/cmd/agent/common/autodiscovery.go @@ -20,8 +20,10 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" confad "github.com/DataDog/datadog-agent/pkg/config/autodiscovery" + pkgconfigenv "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/util/jsonquery" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -45,34 +47,34 @@ func setupAutoDiscovery(confSearchPaths []string, wmeta workloadmeta.Component, ac.AddConfigProvider( providers.NewFileConfigProvider(acTelemetryStore), - config.Datadog().GetBool("autoconf_config_files_poll"), - time.Duration(config.Datadog().GetInt("autoconf_config_files_poll_interval"))*time.Second, + pkgconfigsetup.Datadog().GetBool("autoconf_config_files_poll"), + time.Duration(pkgconfigsetup.Datadog().GetInt("autoconf_config_files_poll_interval"))*time.Second, ) // Autodiscovery cannot easily use config.RegisterOverrideFunc() due to Unmarshalling extraConfigProviders, extraConfigListeners := confad.DiscoverComponentsFromConfig() - var extraEnvProviders []config.ConfigurationProviders - var extraEnvListeners []config.Listeners - if config.IsAutoconfigEnabled() && !config.IsCLCRunner() { + var extraEnvProviders []pkgconfigsetup.ConfigurationProviders + var extraEnvListeners []pkgconfigsetup.Listeners + if pkgconfigenv.IsAutoconfigEnabled(pkgconfigsetup.Datadog()) && !pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()) { extraEnvProviders, extraEnvListeners = confad.DiscoverComponentsFromEnv() } // Register additional configuration providers - var configProviders []config.ConfigurationProviders - var uniqueConfigProviders map[string]config.ConfigurationProviders - err := config.Datadog().UnmarshalKey("config_providers", &configProviders) + var configProviders []pkgconfigsetup.ConfigurationProviders + var uniqueConfigProviders map[string]pkgconfigsetup.ConfigurationProviders + err := structure.UnmarshalKey(pkgconfigsetup.Datadog(), "config_providers", &configProviders) if err == nil { - uniqueConfigProviders = make(map[string]config.ConfigurationProviders, len(configProviders)+len(extraEnvProviders)+len(configProviders)) + uniqueConfigProviders = make(map[string]pkgconfigsetup.ConfigurationProviders, len(configProviders)+len(extraEnvProviders)+len(configProviders)) for _, provider := range configProviders { uniqueConfigProviders[provider.Name] = provider } // Add extra config providers - for _, name := range config.Datadog().GetStringSlice("extra_config_providers") { + for _, name := range pkgconfigsetup.Datadog().GetStringSlice("extra_config_providers") { if _, found := uniqueConfigProviders[name]; !found { - uniqueConfigProviders[name] = config.ConfigurationProviders{Name: name, Polling: true} + uniqueConfigProviders[name] = pkgconfigsetup.ConfigurationProviders{Name: name, Polling: true} } else { log.Infof("Duplicate AD provider from extra_config_providers discarded as already present in config_providers: %s", name) } @@ -87,7 +89,7 @@ func setupAutoDiscovery(confSearchPaths []string, wmeta workloadmeta.Component, } if enableContainerProvider { - uniqueConfigProviders[names.KubeContainer] = config.ConfigurationProviders{Name: names.KubeContainer} + uniqueConfigProviders[names.KubeContainer] = pkgconfigsetup.ConfigurationProviders{Name: names.KubeContainer} } for _, provider := range extraConfigProviders { @@ -123,12 +125,12 @@ func setupAutoDiscovery(confSearchPaths []string, wmeta workloadmeta.Component, } } - var listeners []config.Listeners - err = config.Datadog().UnmarshalKey("listeners", &listeners) + var listeners []pkgconfigsetup.Listeners + err = structure.UnmarshalKey(pkgconfigsetup.Datadog(), "listeners", &listeners) if err == nil { // Add extra listeners - for _, name := range config.Datadog().GetStringSlice("extra_listeners") { - listeners = append(listeners, config.Listeners{Name: name}) + for _, name := range pkgconfigsetup.Datadog().GetStringSlice("extra_listeners") { + listeners = append(listeners, pkgconfigsetup.Listeners{Name: name}) } // The "docker" and "ecs" listeners were replaced with the diff --git a/cmd/agent/common/common.go b/cmd/agent/common/common.go index 4ac33147c8185..272fed027007f 100644 --- a/cmd/agent/common/common.go +++ b/cmd/agent/common/common.go @@ -15,9 +15,9 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/common/path" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -28,8 +28,8 @@ func GetPythonPaths() []string { return []string{ path.GetDistPath(), // common modules are shipped in the dist path directly or under the "checks/" sub-dir path.PyChecksPath, // integrations-core legacy checks - filepath.Join(path.GetDistPath(), "checks.d"), // custom checks in the "checks.d/" sub-dir of the dist path - config.Datadog().GetString("additional_checksd"), // custom checks, least precedent check location + filepath.Join(path.GetDistPath(), "checks.d"), // custom checks in the "checks.d/" sub-dir of the dist path + pkgconfigsetup.Datadog().GetString("additional_checksd"), // custom checks, least precedent check location } } @@ -43,10 +43,10 @@ func GetVersion(w http.ResponseWriter, _ *http.Request) { // NewSettingsClient returns a configured runtime settings client. func NewSettingsClient() (settings.Client, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } hc := util.GetClient(false) - return settingshttp.NewClient(hc, fmt.Sprintf("https://%v:%v/agent/config", ipcAddress, config.Datadog().GetInt("cmd_port")), "agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil + return settingshttp.NewClient(hc, fmt.Sprintf("https://%v:%v/agent/config", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")), "agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil } diff --git a/cmd/agent/common/common_windows.go b/cmd/agent/common/common_windows.go index 96b481944f63c..9bcf4cf48b75a 100644 --- a/cmd/agent/common/common_windows.go +++ b/cmd/agent/common/common_windows.go @@ -10,7 +10,7 @@ import ( "path/filepath" "github.com/DataDog/datadog-agent/cmd/agent/common/path" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/winutil" "github.com/DataDog/datadog-agent/pkg/util/winutil/messagestrings" @@ -48,11 +48,11 @@ func CheckAndUpgradeConfig() error { log.Debug("Previous config file not found, not upgrading") return nil } - config.Datadog().AddConfigPath(path.DefaultConfPath) - _, err := config.LoadWithoutSecret() + pkgconfigsetup.Datadog().AddConfigPath(path.DefaultConfPath) + _, err := pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil) if err == nil { // was able to read config, check for api key - if config.Datadog().GetString("api_key") != "" { + if pkgconfigsetup.Datadog().GetString("api_key") != "" { log.Debug("Datadog.yaml found, and API key present. Not upgrading config") return nil } diff --git a/cmd/agent/common/helpers.go b/cmd/agent/common/helpers.go index 023fb29c384ee..b8738de623365 100644 --- a/cmd/agent/common/helpers.go +++ b/cmd/agent/common/helpers.go @@ -7,13 +7,12 @@ package common import ( "github.com/DataDog/datadog-agent/comp/core/settings" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) // SetupInternalProfiling is a common helper to configure runtime settings for internal profiling. -func SetupInternalProfiling(settings settings.Component, cfg config.Reader, configPrefix string) { +func SetupInternalProfiling(settings settings.Component, cfg model.Reader, configPrefix string) { if v := cfg.GetInt(configPrefix + "internal_profiling.block_profile_rate"); v > 0 { if err := settings.SetRuntimeSetting("runtime_block_profile_rate", v, model.SourceAgentRuntime); err != nil { log.Errorf("Error setting block profile rate: %v", err) diff --git a/cmd/agent/common/import.go b/cmd/agent/common/import.go index 019c10789d260..6d64c0b8bad75 100644 --- a/cmd/agent/common/import.go +++ b/cmd/agent/common/import.go @@ -18,8 +18,8 @@ import ( "github.com/fatih/color" yaml "gopkg.in/yaml.v2" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/legacy" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // TransformationFunc type represents transformation applicable to byte slices @@ -30,7 +30,7 @@ func ImportConfig(oldConfigDir string, newConfigDir string, force bool) error { datadogConfPath := filepath.Join(oldConfigDir, "datadog.conf") datadogYamlPath := filepath.Join(newConfigDir, "datadog.yaml") traceAgentConfPath := filepath.Join(newConfigDir, "trace-agent.conf") - configConverter := config.NewConfigConverter() + configConverter := legacy.NewConfigConverter() const cfgExt = ".yaml" const dirExt = ".d" @@ -52,14 +52,14 @@ func ImportConfig(oldConfigDir string, newConfigDir string, force bool) error { } // setup the configuration system - config.Datadog().AddConfigPath(newConfigDir) - _, err = config.LoadWithoutSecret() + pkgconfigsetup.Datadog().AddConfigPath(newConfigDir) + _, err = pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil) if err != nil { return fmt.Errorf("unable to load Datadog config file: %s", err) } // we won't overwrite the conf file if it contains a valid api_key - if config.Datadog().GetString("api_key") != "" && !force { + if pkgconfigsetup.Datadog().GetString("api_key") != "" && !force { return fmt.Errorf("%s seems to contain a valid configuration, run the command again with --force or -f to overwrite it", datadogYamlPath) } @@ -136,7 +136,7 @@ func ImportConfig(oldConfigDir string, newConfigDir string, force bool) error { } // marshal the config object to YAML - b, err := yaml.Marshal(config.Datadog().AllSettings()) + b, err := yaml.Marshal(pkgconfigsetup.Datadog().AllSettings()) if err != nil { return fmt.Errorf("unable to marshal config to YAML: %v", err) } diff --git a/cmd/agent/common/misconfig/mounts.go b/cmd/agent/common/misconfig/mounts.go index 4fe51cf3ea17f..e9d4d8153583b 100644 --- a/cmd/agent/common/misconfig/mounts.go +++ b/cmd/agent/common/misconfig/mounts.go @@ -18,8 +18,8 @@ import ( "github.com/pkg/errors" "github.com/syndtr/gocapability/capability" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -68,7 +68,7 @@ func procMount() error { if !haveEgid { groups = append(groups, egid) } - path := config.Datadog().GetString("container_proc_root") + path := pkgconfigsetup.Datadog().GetString("container_proc_root") if env.IsContainerized() && path != "/proc" { path = filepath.Join(path, "1/mounts") } else { diff --git a/cmd/agent/common/test_helpers.go b/cmd/agent/common/test_helpers.go index 2daaf02efde50..aca2815135a43 100644 --- a/cmd/agent/common/test_helpers.go +++ b/cmd/agent/common/test_helpers.go @@ -16,13 +16,14 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/common/path" "github.com/DataDog/datadog-agent/comp/core/secrets" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" ) // SetupConfigForTest fires up the configuration system and returns warnings if any. -func SetupConfigForTest(confFilePath string) (*config.Warnings, error) { - cfg := config.Datadog() +func SetupConfigForTest(confFilePath string) (*model.Warnings, error) { + cfg := pkgconfigsetup.Datadog() origin := "datadog.yaml" // set the paths where a config file is expected if len(confFilePath) != 0 { @@ -36,7 +37,7 @@ func SetupConfigForTest(confFilePath string) (*config.Warnings, error) { } cfg.AddConfigPath(path.DefaultConfPath) // load the configuration - warnings, err := config.LoadDatadogCustom(cfg, origin, optional.NewNoneOption[secrets.Component](), nil) + warnings, err := pkgconfigsetup.LoadDatadogCustom(cfg, origin, optional.NewNoneOption[secrets.Component](), nil) if err != nil { // special-case permission-denied with a clearer error message if errors.Is(err, fs.ErrPermission) { diff --git a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example index 233f284d2576d..89299ee1c00f3 100644 --- a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example +++ b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example @@ -5,11 +5,11 @@ init_config: # # min_collection_interval: 60 - ## @param timeout - integer - optional - default: 10000 - ## Specifies how much time the full traceroute should take - ## in milliseconds + ## @param timeout - integer - optional - default: 1000 + ## Specifies how much time in milliseconds the traceroute should + ## wait for a response from each hop before timing out. # - # timeout: 10000 + # timeout: 1000 # Network Path integration is used to monitor individual endpoints. # Supported platforms are Linux and Windows. macOS is not supported yet. @@ -21,6 +21,7 @@ instances: ## @param port - integer - optional ## Port of the target endpoint to monitor via Network Path. + ## For UDP, we do not recommend setting the port since it can make probes less reliable. ## If port is not set, a random port will be used. # # port: @@ -36,11 +37,11 @@ instances: # # max_ttl: - ## @param timeout - integer - optional - default: 10000 - ## Specifies how much time the full traceroute should take - ## in milliseconds + ## @param timeout - integer - optional - default: 1000 + ## Specifies how much time in milliseconds the traceroute should + ## wait for a response from each hop before timing out. # - # timeout: 10000 + # timeout: 1000 ## @param min_collection_interval - number - optional - default: 60 ## Specifies how frequently we should probe the endpoint. diff --git a/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default b/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default new file mode 100644 index 0000000000000..00d9a2dbba2c8 --- /dev/null +++ b/cmd/agent/dist/conf.d/service_discovery.d/conf.yaml.default @@ -0,0 +1,2 @@ +instances: + - {} diff --git a/cmd/agent/subcommands/diagnose/command.go b/cmd/agent/subcommands/diagnose/command.go index 9710f8298329f..46f9ccea0e730 100644 --- a/cmd/agent/subcommands/diagnose/command.go +++ b/cmd/agent/subcommands/diagnose/command.go @@ -30,7 +30,7 @@ import ( workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -331,7 +331,7 @@ func printPayload(name payloadName, _ log.Component, config config.Component) er } c := util.GetClient(false) - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } diff --git a/cmd/agent/subcommands/dogstatsd/command.go b/cmd/agent/subcommands/dogstatsd/command.go index 80a1a955d6a1f..f81a6519624c2 100644 --- a/cmd/agent/subcommands/dogstatsd/command.go +++ b/cmd/agent/subcommands/dogstatsd/command.go @@ -25,7 +25,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -81,7 +81,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { func triggerDump(config cconfig.Component) (string, error) { c := util.GetClient(false) - addr, err := pkgconfig.GetIPCAddress() + addr, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } diff --git a/cmd/agent/subcommands/dogstatsdcapture/command.go b/cmd/agent/subcommands/dogstatsdcapture/command.go index 25e0ba2f75201..d0f6afeaa20a6 100644 --- a/cmd/agent/subcommands/dogstatsdcapture/command.go +++ b/cmd/agent/subcommands/dogstatsdcapture/command.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -102,7 +102,7 @@ func dogstatsdCapture(_ log.Component, config config.Component, cliParams *cliPa conn, err := grpc.DialContext( //nolint:staticcheck // TODO (ASC) fix grpc.DialContext is deprecated ctx, - fmt.Sprintf(":%v", pkgconfig.Datadog().GetInt("cmd_port")), + fmt.Sprintf(":%v", pkgconfigsetup.Datadog().GetInt("cmd_port")), grpc.WithTransportCredentials(creds), ) if err != nil { diff --git a/cmd/agent/subcommands/dogstatsdreplay/command.go b/cmd/agent/subcommands/dogstatsdreplay/command.go index 3186fe144f8e8..de85ffb6517f7 100644 --- a/cmd/agent/subcommands/dogstatsdreplay/command.go +++ b/cmd/agent/subcommands/dogstatsdreplay/command.go @@ -26,7 +26,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/impl" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -113,7 +113,7 @@ func dogstatsdReplay(_ log.Component, config config.Component, cliParams *cliPar apiconn, err := grpc.DialContext( //nolint:staticcheck // TODO (ASC) fix grpc.DialContext is deprecated ctx, - fmt.Sprintf(":%v", pkgconfig.Datadog().GetInt("cmd_port")), + fmt.Sprintf(":%v", pkgconfigsetup.Datadog().GetInt("cmd_port")), grpc.WithTransportCredentials(creds), ) if err != nil { @@ -133,7 +133,7 @@ func dogstatsdReplay(_ log.Component, config config.Component, cliParams *cliPar return err } - s := pkgconfig.Datadog().GetString("dogstatsd_socket") + s := pkgconfigsetup.Datadog().GetString("dogstatsd_socket") if s == "" { return fmt.Errorf("Dogstatsd UNIX socket disabled") } @@ -150,7 +150,7 @@ func dogstatsdReplay(_ log.Component, config config.Component, cliParams *cliPar defer syscall.Close(sk) err = syscall.SetsockoptInt(sk, syscall.SOL_SOCKET, syscall.SO_SNDBUF, - pkgconfig.Datadog().GetInt("dogstatsd_buffer_size")) + pkgconfigsetup.Datadog().GetInt("dogstatsd_buffer_size")) if err != nil { return err } diff --git a/cmd/agent/subcommands/dogstatsdstats/command.go b/cmd/agent/subcommands/dogstatsdstats/command.go index c8c12ce29f9da..aba4c10809db6 100644 --- a/cmd/agent/subcommands/dogstatsdstats/command.go +++ b/cmd/agent/subcommands/dogstatsdstats/command.go @@ -21,7 +21,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/input" @@ -71,11 +71,11 @@ func requestDogstatsdStats(_ log.Component, config config.Component, cliParams * var e error var s string c := util.GetClient(false) // FIX: get certificates right then make this true - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - urlstr := fmt.Sprintf("https://%v:%v/agent/dogstatsd-stats", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + urlstr := fmt.Sprintf("https://%v:%v/agent/dogstatsd-stats", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) // Set session token e = util.SetAuthToken(config) diff --git a/cmd/agent/subcommands/flare/command.go b/cmd/agent/subcommands/flare/command.go index d990b2aabe11c..c39613e753945 100644 --- a/cmd/agent/subcommands/flare/command.go +++ b/cmd/agent/subcommands/flare/command.go @@ -49,8 +49,8 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/resources/resourcesimpl" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/settings" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -168,7 +168,7 @@ func readProfileData(seconds int) (flare.ProfileData, error) { type pprofGetter func(path string) ([]byte, error) tcpGet := func(portConfig string) pprofGetter { - pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfig.Datadog().GetInt(portConfig)) + pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfigsetup.Datadog().GetInt(portConfig)) return func(path string) ([]byte, error) { return util.DoGet(c, pprofURL+path, util.LeaveConnectionOpen) } @@ -224,15 +224,15 @@ func readProfileData(seconds int) (flare.ProfileData, error) { "security-agent": serviceProfileCollector(tcpGet("security_agent.expvar_port"), seconds), } - if pkgconfig.Datadog().GetBool("process_config.enabled") || - pkgconfig.Datadog().GetBool("process_config.container_collection.enabled") || - pkgconfig.Datadog().GetBool("process_config.process_collection.enabled") { + if pkgconfigsetup.Datadog().GetBool("process_config.enabled") || + pkgconfigsetup.Datadog().GetBool("process_config.container_collection.enabled") || + pkgconfigsetup.Datadog().GetBool("process_config.process_collection.enabled") { agentCollectors["process"] = serviceProfileCollector(tcpGet("process_config.expvar_port"), seconds) } - if pkgconfig.Datadog().GetBool("apm_config.enabled") { - traceCpusec := pkgconfig.Datadog().GetInt("apm_config.receiver_timeout") + if pkgconfigsetup.Datadog().GetBool("apm_config.enabled") { + traceCpusec := pkgconfigsetup.Datadog().GetInt("apm_config.receiver_timeout") if traceCpusec > seconds { // do not exceed requested duration traceCpusec = seconds @@ -244,8 +244,8 @@ func readProfileData(seconds int) (flare.ProfileData, error) { agentCollectors["trace"] = serviceProfileCollector(tcpGet("apm_config.debug.port"), traceCpusec) } - if pkgconfig.SystemProbe().GetBool("system_probe_config.enabled") { - probeUtil, probeUtilErr := net.GetRemoteSystemProbeUtil(pkgconfig.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + if pkgconfigsetup.SystemProbe().GetBool("system_probe_config.enabled") { + probeUtil, probeUtilErr := net.GetRemoteSystemProbeUtil(pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if !errors.Is(probeUtilErr, net.ErrNotImplemented) { sysProbeGet := func() pprofGetter { @@ -386,16 +386,16 @@ func makeFlare(flareComp flare.Component, func requestArchive(flareComp flare.Component, pdata flare.ProfileData) (string, error) { fmt.Fprintln(color.Output, color.BlueString("Asking the agent to build the flare archive.")) c := util.GetClient(false) // FIX: get certificates right then make this true - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { fmt.Fprintln(color.Output, color.RedString(fmt.Sprintf("Error getting IPC address for the agent: %s", err))) return createArchive(flareComp, pdata, err) } - urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) // Set session token - if err = util.SetAuthToken(pkgconfig.Datadog()); err != nil { + if err = util.SetAuthToken(pkgconfigsetup.Datadog()); err != nil { fmt.Fprintln(color.Output, color.RedString(fmt.Sprintf("Error: %s", err))) return createArchive(flareComp, pdata, err) } diff --git a/cmd/agent/subcommands/flare/command_other_test.go b/cmd/agent/subcommands/flare/command_other_test.go new file mode 100644 index 0000000000000..6730af25dcece --- /dev/null +++ b/cmd/agent/subcommands/flare/command_other_test.go @@ -0,0 +1,36 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build !windows + +// Package flare implements 'agent flare'. +package flare + +import ( + "net/http" + "net/http/httptest" + + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/config/model" +) + +// NewSystemProbeTestServer starts a new mock server to handle System Probe requests. +func NewSystemProbeTestServer(_ http.Handler) (*httptest.Server, error) { + // Linux still uses a port-based system-probe, it does not need a dedicated system probe server + // for the tests. + return nil, nil +} + +// InjectConnectionFailures injects a failure in TestReadProfileDataErrors. +func InjectConnectionFailures(_ model.Config, _ model.Config) { +} + +// CheckExpectedConnectionFailures checks the expected errors after simulated +// connection failures. +func CheckExpectedConnectionFailures(c *commandTestSuite, err error) { + // System probe by default is disabled and no connection is attempted for it in the test. + require.Regexp(c.T(), "^4 errors occurred:\n", err.Error()) +} diff --git a/cmd/agent/subcommands/flare/command_test.go b/cmd/agent/subcommands/flare/command_test.go index 5dbeeb2c68376..b555c751f58b5 100644 --- a/cmd/agent/subcommands/flare/command_test.go +++ b/cmd/agent/subcommands/flare/command_test.go @@ -31,24 +31,38 @@ type commandTestSuite struct { sysprobeSocketPath string tcpServer *httptest.Server unixServer *httptest.Server + systemProbeServer *httptest.Server } func (c *commandTestSuite) SetupSuite() { t := c.T() c.sysprobeSocketPath = path.Join(t.TempDir(), "sysprobe.sock") - c.tcpServer, c.unixServer = c.getPprofTestServer() } -func (c *commandTestSuite) TearDownSuite() { - c.tcpServer.Close() - if c.unixServer != nil { - c.unixServer.Close() - } +// startTestServers starts test servers from a clean state to ensure no cache responses are used. +// This should be called by each test that requires them. +func (c *commandTestSuite) startTestServers() { + t := c.T() + c.tcpServer, c.unixServer, c.systemProbeServer = c.getPprofTestServer() + + t.Cleanup(func() { + if c.tcpServer != nil { + c.tcpServer.Close() + c.tcpServer = nil + } + if c.unixServer != nil { + c.unixServer.Close() + c.unixServer = nil + } + if c.systemProbeServer != nil { + c.systemProbeServer.Close() + c.systemProbeServer = nil + } + }) } -func (c *commandTestSuite) getPprofTestServer() (tcpServer *httptest.Server, unixServer *httptest.Server) { - t := c.T() - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +func newMockHandler() http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/debug/pprof/heap": w.Write([]byte("heap_profile")) @@ -67,17 +81,28 @@ func (c *commandTestSuite) getPprofTestServer() (tcpServer *httptest.Server, uni w.WriteHeader(500) } }) +} +func (c *commandTestSuite) getPprofTestServer() (tcpServer *httptest.Server, unixServer *httptest.Server, sysProbeServer *httptest.Server) { + var err error + t := c.T() + + handler := newMockHandler() tcpServer = httptest.NewServer(handler) if runtime.GOOS == "linux" { unixServer = httptest.NewUnstartedServer(handler) - var err error unixServer.Listener, err = net.Listen("unix", c.sysprobeSocketPath) require.NoError(t, err, "could not create listener for unix socket on %s", c.sysprobeSocketPath) unixServer.Start() } - return tcpServer, unixServer + sysProbeServer, err = NewSystemProbeTestServer(handler) + require.NoError(c.T(), err, "could not restart system probe server") + if sysProbeServer != nil { + sysProbeServer.Start() + } + + return tcpServer, unixServer, sysProbeServer } func TestCommandTestSuite(t *testing.T) { @@ -86,6 +111,8 @@ func TestCommandTestSuite(t *testing.T) { func (c *commandTestSuite) TestReadProfileData() { t := c.T() + c.startTestServers() + u, err := url.Parse(c.tcpServer.URL) require.NoError(t, err) port := u.Port() @@ -154,6 +181,8 @@ func (c *commandTestSuite) TestReadProfileData() { func (c *commandTestSuite) TestReadProfileDataNoTraceAgent() { t := c.T() + c.startTestServers() + u, err := url.Parse(c.tcpServer.URL) require.NoError(t, err) port := u.Port() @@ -217,6 +246,8 @@ func (c *commandTestSuite) TestReadProfileDataNoTraceAgent() { func (c *commandTestSuite) TestReadProfileDataErrors() { t := c.T() + c.startTestServers() + mockConfig := configmock.New(t) // setting Core Agent Expvar port to 0 to ensure failing on fetch (using the default value can lead to // successful request when running next to an Agent) @@ -226,9 +257,13 @@ func (c *commandTestSuite) TestReadProfileDataErrors() { mockConfig.SetWithoutSource("process_config.enabled", true) mockConfig.SetWithoutSource("process_config.expvar_port", 0) + mockSysProbeConfig := configmock.NewSystemProbe(t) + InjectConnectionFailures(mockSysProbeConfig, mockConfig) + data, err := readProfileData(10) + require.Error(t, err) - require.Regexp(t, "^4 errors occurred:\n", err.Error()) + CheckExpectedConnectionFailures(c, err) require.Len(t, data, 0) } diff --git a/cmd/agent/subcommands/flare/command_windows_test.go b/cmd/agent/subcommands/flare/command_windows_test.go new file mode 100644 index 0000000000000..9780e66474033 --- /dev/null +++ b/cmd/agent/subcommands/flare/command_windows_test.go @@ -0,0 +1,71 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build windows + +// Package flare implements 'agent flare'. +package flare + +import ( + "net/http" + "net/http/httptest" + + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/config/model" + processNet "github.com/DataDog/datadog-agent/pkg/process/net" +) + +const ( + // SystemProbeTestPipeName is the test named pipe for system-probe + systemProbeTestPipeName = `\\.\pipe\dd_system_probe_flare_test` + + // systemProbeTestPipeSecurityDescriptor has a DACL that allows Everyone access for these tests. + systemProbeTestPipeSecurityDescriptor = "D:PAI(A;;FA;;;WD)" +) + +// NewSystemProbeTestServer starts a new mock server to handle System Probe requests. +func NewSystemProbeTestServer(handler http.Handler) (*httptest.Server, error) { + server := httptest.NewUnstartedServer(handler) + + // Override the named pipe path for tests to avoid conflicts with the locally installed Datadog agent. + processNet.OverrideSystemProbeNamedPipeConfig( + systemProbeTestPipeName, + systemProbeTestPipeSecurityDescriptor) + + conn, err := processNet.NewSystemProbeListener("") + if err != nil { + return nil, err + } + + server.Listener = conn.GetListener() + return server, nil +} + +// InjectConnectionFailures injects a failure in TestReadProfileDataErrors. +func InjectConnectionFailures(mockSysProbeConfig model.Config, mockConfig model.Config) { + // Explicitly enabled system probe to exercise connections to it. + mockSysProbeConfig.SetWithoutSource("system_probe_config.enabled", true) + + // Exercise a connection failure for a Windows system probe named pipe client by + // making them use a bad path. + // The system probe http server must be setup before this override. + processNet.OverrideSystemProbeNamedPipeConfig( + `\\.\pipe\dd_system_probe_test_bad`, + systemProbeTestPipeSecurityDescriptor) + + // The security-agent connection is expected to fail too in this test, but + // by enabling system probe, a port will be provided to it (security agent). + // Here we make sure the security agent port is a bad one. + mockConfig.SetWithoutSource("security_agent.expvar_port", 0) +} + +// CheckExpectedConnectionFailures checks the expected errors after simulated +// connection failures. +func CheckExpectedConnectionFailures(c *commandTestSuite, err error) { + // In Windows, this test explicitly simulates a system probe connection failure. + // We expect the standard socket errors (4) and a named pipe failure for system probe. + require.Regexp(c.T(), "^5 errors occurred:\n", err.Error()) +} diff --git a/cmd/agent/subcommands/integrations/command.go b/cmd/agent/subcommands/integrations/command.go index 867f85bfa945b..c47de16dca4da 100644 --- a/cmd/agent/subcommands/integrations/command.go +++ b/cmd/agent/subcommands/integrations/command.go @@ -27,7 +27,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/executable" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -556,7 +556,7 @@ func downloadWheel(cliParams *cliParams, integration, version, rootLayoutType st downloaderCmd.Env = environ // Proxy support - proxies := pkgconfig.Datadog().GetProxies() + proxies := pkgconfigsetup.Datadog().GetProxies() if proxies != nil { downloaderCmd.Env = append(downloaderCmd.Env, fmt.Sprintf("HTTP_PROXY=%s", proxies.HTTP), @@ -798,7 +798,7 @@ func getVersionFromReqLine(integration string, lines string) (*semver.Version, b } func moveConfigurationFilesOf(cliParams *cliParams, integration string) error { - confFolder := pkgconfig.Datadog().GetString("confd_path") + confFolder := pkgconfigsetup.Datadog().GetString("confd_path") check := getIntegrationName(integration) confFileDest := filepath.Join(confFolder, fmt.Sprintf("%s.d", check)) if err := os.MkdirAll(confFileDest, os.ModeDir|0755); err != nil { diff --git a/cmd/agent/subcommands/remoteconfig/command.go b/cmd/agent/subcommands/remoteconfig/command.go index b01422eddc686..1e5e8b5b45062 100644 --- a/cmd/agent/subcommands/remoteconfig/command.go +++ b/cmd/agent/subcommands/remoteconfig/command.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -57,7 +57,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { } func state(_ *cliParams, config config.Component) error { - if !pkgconfig.IsRemoteConfigEnabled(config) { + if !pkgconfigsetup.IsRemoteConfigEnabled(config) { return errors.New("remote configuration is not enabled") } fmt.Println("Fetching the configuration and director repos state..") @@ -75,12 +75,12 @@ func state(_ *cliParams, config config.Component) error { } ctx = metadata.NewOutgoingContext(ctx, md) - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - cli, err := agentgrpc.GetDDAgentSecureClient(ctx, ipcAddress, pkgconfig.GetIPCPort()) + cli, err := agentgrpc.GetDDAgentSecureClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } @@ -92,7 +92,7 @@ func state(_ *cliParams, config config.Component) error { } var stateHA *pbgo.GetStateConfigResponse - if pkgconfig.Datadog().GetBool("multi_region_failover.enabled") { + if pkgconfigsetup.Datadog().GetBool("multi_region_failover.enabled") { stateHA, err = cli.GetConfigStateHA(ctx, in) if err != nil { return fmt.Errorf("couldn't get the HA repositories state: %w", err) diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 6ff4f17804df5..45da16dba5426 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -132,9 +132,9 @@ import ( profileStatus "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/status" "github.com/DataDog/datadog-agent/pkg/collector/python" "github.com/DataDog/datadog-agent/pkg/commonchecks" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/remote/data" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/jmxfetch" "github.com/DataDog/datadog-agent/pkg/serializer" clusteragentStatus "github.com/DataDog/datadog-agent/pkg/status/clusteragent" @@ -376,7 +376,7 @@ func getSharedFxOption() fx.Option { apiimpl.Module(), commonendpoints.Module(), compressionimpl.Module(), - demultiplexerimpl.Module(), + demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams(demultiplexerimpl.WithDogstatsdNoAggregationPipelineConfig())), demultiplexerendpointfx.Module(), dogstatsd.Bundle(dogstatsdServer.Params{Serverless: false}), fx.Provide(func(logsagent optional.Option[logsAgent.Component]) optional.Option[logsagentpipeline.Component] { @@ -408,7 +408,7 @@ func getSharedFxOption() fx.Option { lc.Append(fx.Hook{ OnStart: func(_ context.Context) error { // setup the AutoConfig instance - common.LoadComponents(secretResolver, wmeta, ac, pkgconfig.Datadog().GetString("confd_path")) + common.LoadComponents(secretResolver, wmeta, ac, pkgconfigsetup.Datadog().GetString("confd_path")) return nil }, }) @@ -416,15 +416,7 @@ func getSharedFxOption() fx.Option { logs.Bundle(), langDetectionClimpl.Module(), metadata.Bundle(), - // injecting the aggregator demultiplexer to FX until we migrate it to a proper component. This allows - // other already migrated components to request it. - fx.Provide(func(config config.Component) demultiplexerimpl.Params { - params := demultiplexerimpl.NewDefaultParams() - params.EnableNoAggregationPipeline = config.GetBool("dogstatsd_no_aggregation_pipeline") - return params - }), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewDefaultParams()), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDefaultParams()), eventplatformimpl.Module(eventplatformimpl.NewDefaultParams()), eventplatformreceiverimpl.Module(), @@ -511,17 +503,17 @@ func startAgent( log.Infof("Starting Datadog Agent v%v", version.AgentVersion) } - if err := util.SetupCoreDump(pkgconfig.Datadog()); err != nil { + if err := util.SetupCoreDump(pkgconfigsetup.Datadog()); err != nil { log.Warnf("Can't setup core dumps: %v, core dumps might not be available after a crash", err) } - if v := pkgconfig.Datadog().GetBool("internal_profiling.capture_all_allocations"); v { + if v := pkgconfigsetup.Datadog().GetBool("internal_profiling.capture_all_allocations"); v { runtime.MemProfileRate = 1 log.Infof("MemProfileRate set to 1, capturing every single memory allocation!") } // Setup Internal Profiling - common.SetupInternalProfiling(settings, pkgconfig.Datadog(), "") + common.SetupInternalProfiling(settings, pkgconfigsetup.Datadog(), "") // Setup expvar server telemetryHandler := telemetry.Handler() @@ -537,14 +529,14 @@ func startAgent( log.Infof("Hostname is: %s", hostnameDetected) // start remote configuration management - if pkgconfig.IsRemoteConfigEnabled(pkgconfig.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { // Subscribe to `AGENT_TASK` product rcclient.SubscribeAgentTask() // Subscribe to `APM_TRACING` product rcclient.SubscribeApmTracing() - if pkgconfig.Datadog().GetBool("remote_configuration.agent_integrations.enabled") { + if pkgconfigsetup.Datadog().GetBool("remote_configuration.agent_integrations.enabled") { // Spin up the config provider to schedule integrations through remote-config rcProvider := providers.NewRemoteConfigProvider() rcclient.Subscribe(data.ProductAgentIntegrations, rcProvider.IntegrationScheduleCallback) @@ -555,7 +547,7 @@ func startAgent( // start clc runner server // only start when the cluster agent is enabled and a cluster check runner host is enabled - if pkgconfig.Datadog().GetBool("cluster_agent.enabled") && pkgconfig.Datadog().GetBool("clc_runner_enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") && pkgconfigsetup.Datadog().GetBool("clc_runner_enabled") { if err = clcrunnerapi.StartCLCRunnerServer(map[string]http.Handler{ "/telemetry": telemetryHandler, }, ac); err != nil { @@ -564,7 +556,7 @@ func startAgent( } // Create the Leader election engine without initializing it - if pkgconfig.Datadog().GetBool("leader_election") { + if pkgconfigsetup.Datadog().GetBool("leader_election") { leaderelection.CreateGlobalLeaderEngine(ctx) } diff --git a/cmd/agent/subcommands/run/dependent_services_nix.go b/cmd/agent/subcommands/run/dependent_services_nix.go index 862bf36ccede9..ee2abf597945f 100644 --- a/cmd/agent/subcommands/run/dependent_services_nix.go +++ b/cmd/agent/subcommands/run/dependent_services_nix.go @@ -6,12 +6,12 @@ package run -import "github.com/DataDog/datadog-agent/pkg/config" +import "github.com/DataDog/datadog-agent/pkg/config/model" // Servicedef defines a service type Servicedef struct { name string - configKeys map[string]config.Config + configKeys map[string]model.Config } var subservices []Servicedef diff --git a/cmd/agent/subcommands/run/dependent_services_windows.go b/cmd/agent/subcommands/run/dependent_services_windows.go index ee607bbc0a741..c1b95c2fa4f44 100644 --- a/cmd/agent/subcommands/run/dependent_services_windows.go +++ b/cmd/agent/subcommands/run/dependent_services_windows.go @@ -13,7 +13,8 @@ import ( "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -22,7 +23,7 @@ type serviceInitFunc func() (err error) // Servicedef defines a service type Servicedef struct { name string - configKeys map[string]config.Config + configKeys map[string]model.Config serviceName string serviceInit serviceInitFunc @@ -31,40 +32,40 @@ type Servicedef struct { var subservices = []Servicedef{ { name: "apm", - configKeys: map[string]config.Config{ - "apm_config.enabled": config.Datadog(), + configKeys: map[string]model.Config{ + "apm_config.enabled": pkgconfigsetup.Datadog(), }, serviceName: "datadog-trace-agent", serviceInit: apmInit, }, { name: "process", - configKeys: map[string]config.Config{ - "process_config.enabled": config.Datadog(), - "process_config.process_collection.enabled": config.Datadog(), - "process_config.container_collection.enabled": config.Datadog(), - "process_config.process_discovery.enabled": config.Datadog(), - "network_config.enabled": config.SystemProbe(), - "system_probe_config.enabled": config.SystemProbe(), + configKeys: map[string]model.Config{ + "process_config.enabled": pkgconfigsetup.Datadog(), + "process_config.process_collection.enabled": pkgconfigsetup.Datadog(), + "process_config.container_collection.enabled": pkgconfigsetup.Datadog(), + "process_config.process_discovery.enabled": pkgconfigsetup.Datadog(), + "network_config.enabled": pkgconfigsetup.SystemProbe(), + "system_probe_config.enabled": pkgconfigsetup.SystemProbe(), }, serviceName: "datadog-process-agent", serviceInit: processInit, }, { name: "sysprobe", - configKeys: map[string]config.Config{ - "network_config.enabled": config.SystemProbe(), - "system_probe_config.enabled": config.SystemProbe(), - "windows_crash_detection.enabled": config.SystemProbe(), - "runtime_security_config.enabled": config.SystemProbe(), + configKeys: map[string]model.Config{ + "network_config.enabled": pkgconfigsetup.SystemProbe(), + "system_probe_config.enabled": pkgconfigsetup.SystemProbe(), + "windows_crash_detection.enabled": pkgconfigsetup.SystemProbe(), + "runtime_security_config.enabled": pkgconfigsetup.SystemProbe(), }, serviceName: "datadog-system-probe", serviceInit: sysprobeInit, }, { name: "cws", - configKeys: map[string]config.Config{ - "runtime_security_config.enabled": config.SystemProbe(), + configKeys: map[string]model.Config{ + "runtime_security_config.enabled": pkgconfigsetup.SystemProbe(), }, serviceName: "datadog-security-agent", serviceInit: securityInit, diff --git a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_listener.go b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_listener.go index 428dce7116f02..193eb6bcdd152 100644 --- a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_listener.go +++ b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_listener.go @@ -14,14 +14,14 @@ import ( "net" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // getCLCRunnerListener returns a listening connection for the cluster level check runner server // The server must only listen on the cluster check runner pod ip // The cluster check runner Agent won't start if the server host is not configured func getCLCRunnerListener() (net.Listener, error) { - podIP := config.Datadog().GetString("clc_runner_host") + podIP := pkgconfigsetup.Datadog().GetString("clc_runner_host") // This is not a security feature // util.IsForbidden only helps to avoid unnecessarily permissive server config if util.IsForbidden(podIP) { @@ -32,5 +32,5 @@ func getCLCRunnerListener() (net.Listener, error) { // IPv6 addresses must be formatted [ip]:port podIP = fmt.Sprintf("[%s]", podIP) } - return net.Listen("tcp", fmt.Sprintf("%v:%v", podIP, config.Datadog().GetInt("clc_runner_port"))) + return net.Listen("tcp", fmt.Sprintf("%v:%v", podIP, pkgconfigsetup.Datadog().GetInt("clc_runner_port"))) } diff --git a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go index 542cc88d42e5a..9b763d3a8b1df 100644 --- a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go +++ b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go @@ -26,7 +26,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery" "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) var clcListener net.Listener @@ -56,12 +57,12 @@ func StartCLCRunnerServer(extraHandlers map[string]http.Handler, ac autodiscover // CLC Runner token // Use the Cluster Agent token - err = util.InitDCAAuthToken(config.Datadog()) + err = util.InitDCAAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } - hosts := []string{"127.0.0.1", "localhost", config.Datadog().GetString("clc_runner_host")} + hosts := []string{"127.0.0.1", "localhost", pkgconfigsetup.Datadog().GetString("clc_runner_host")} _, rootCertPEM, rootKey, err := security.GenerateRootCert(hosts, 2048) if err != nil { return fmt.Errorf("unable to start TLS server: %v", err) @@ -84,14 +85,14 @@ func StartCLCRunnerServer(extraHandlers map[string]http.Handler, ac autodiscover } // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := config.NewLogWriter(4, seelog.WarnLvl) + logWriter, _ := pkglogsetup.NewLogWriter(4, seelog.WarnLvl) srv := &http.Server{ Handler: r, ErrorLog: stdLog.New(logWriter, "Error from the clc runner http API server: ", 0), // log errors to seelog, TLSConfig: &tlsConfig, - WriteTimeout: config.Datadog().GetDuration("clc_runner_server_write_timeout") * time.Second, - ReadHeaderTimeout: config.Datadog().GetDuration("clc_runner_server_readheader_timeout") * time.Second, + WriteTimeout: pkgconfigsetup.Datadog().GetDuration("clc_runner_server_write_timeout") * time.Second, + ReadHeaderTimeout: pkgconfigsetup.Datadog().GetDuration("clc_runner_server_readheader_timeout") * time.Second, } tlsListener := tls.NewListener(clcListener, &tlsConfig) diff --git a/cmd/agent/subcommands/snmp/command.go b/cmd/agent/subcommands/snmp/command.go index b19402aec82f0..4aa75df6821fb 100644 --- a/cmd/agent/subcommands/snmp/command.go +++ b/cmd/agent/subcommands/snmp/command.go @@ -17,6 +17,8 @@ import ( "strings" "time" + "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" + "github.com/gosnmp/gosnmp" "github.com/spf13/cobra" "go.uber.org/fx" @@ -184,20 +186,17 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { err := fxutil.OneShot(scanDevice, fx.Supply(connParams, globalParams, cmd), fx.Provide(func() argsType { return args }), + compressionimpl.Module(), fx.Supply(core.BundleParams{ ConfigParams: config.NewAgentParams(globalParams.ConfFilePath, config.WithExtraConfFiles(globalParams.ExtraConfFilePath), config.WithFleetPoliciesDirPath(globalParams.FleetPoliciesDirPath)), SecretParams: secrets.NewEnabledParams(), LogParams: log.ForOneShot(command.LoggerName, "off", true)}), core.Bundle(), - aggregator.Bundle(), + aggregator.Bundle(demultiplexerimpl.NewDefaultParams()), forwarder.Bundle(defaultforwarder.NewParams()), eventplatformimpl.Module(eventplatformimpl.NewDefaultParams()), eventplatformreceiverimpl.Module(), - orchestratorimpl.Module(), - fx.Provide( - orchestratorimpl.NewDefaultParams, - demultiplexerimpl.NewDefaultParams, - ), + orchestratorimpl.Module(orchestratorimpl.NewDefaultParams()), ) if err != nil { var ue configErr diff --git a/cmd/agent/subcommands/stop/command.go b/cmd/agent/subcommands/stop/command.go index f10ccbf51eadc..868b9baf5a212 100644 --- a/cmd/agent/subcommands/stop/command.go +++ b/cmd/agent/subcommands/stop/command.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -59,7 +59,7 @@ func stop(config config.Component, _ *cliParams, _ log.Component) error { if e != nil { return e } - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } diff --git a/cmd/agent/subcommands/streamep/command.go b/cmd/agent/subcommands/streamep/command.go index c6dbb1dc57a62..9c98b90c4e8cd 100644 --- a/cmd/agent/subcommands/streamep/command.go +++ b/cmd/agent/subcommands/streamep/command.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -56,7 +56,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { //nolint:revive // TODO(CINT) Fix revive linter func streamEventPlatform(_ log.Component, config config.Component, cliParams *cliParams) error { - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } @@ -78,7 +78,7 @@ func streamRequest(url string, body []byte, onChunk func([]byte)) error { c := util.GetClient(false) // Set session token - e = util.SetAuthToken(pkgconfig.Datadog()) + e = util.SetAuthToken(pkgconfigsetup.Datadog()) if e != nil { return e } diff --git a/cmd/agent/subcommands/streamlogs/command.go b/cmd/agent/subcommands/streamlogs/command.go index 57e66416e0f01..2088563f5e9da 100644 --- a/cmd/agent/subcommands/streamlogs/command.go +++ b/cmd/agent/subcommands/streamlogs/command.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -84,7 +84,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { //nolint:revive // TODO(AML) Fix revive linter func streamLogs(_ log.Component, config config.Component, cliParams *CliParams) error { - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } @@ -139,7 +139,7 @@ func streamRequest(url string, body []byte, duration time.Duration, onChunk func c.Timeout = duration } // Set session token - e = util.SetAuthToken(pkgconfig.Datadog()) + e = util.SetAuthToken(pkgconfigsetup.Datadog()) if e != nil { return e } diff --git a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go index b78a1c9223f7f..4a691db8a2b03 100644 --- a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go +++ b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go @@ -55,7 +55,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" @@ -85,12 +85,11 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { core.Bundle(), forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithResolvers())), compressionimpl.Module(), - demultiplexerimpl.Module(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewDisabledParams()), + demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams()), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDisabledParams()), eventplatformimpl.Module(eventplatformimpl.NewDisabledParams()), eventplatformreceiverimpl.Module(), - fx.Supply(demultiplexerimpl.NewDefaultParams()), + // setup workloadmeta wmcatalog.GetCatalog(), workloadmetafx.Module(workloadmeta.Params{ @@ -144,7 +143,7 @@ func run( mainCtx, mainCtxCancel := context.WithCancel(context.Background()) defer mainCtxCancel() // Calling cancel twice is safe - if !pkgconfig.Datadog().IsSet("api_key") { + if !pkgconfigsetup.Datadog().IsSet("api_key") { pkglog.Critical("no API key configured, exiting") return nil } @@ -174,7 +173,7 @@ func run( return err } - common.LoadComponents(secretResolver, wmeta, ac, pkgconfig.Datadog().GetString("confd_path")) + common.LoadComponents(secretResolver, wmeta, ac, pkgconfigsetup.Datadog().GetString("confd_path")) // Set up check collector ac.AddScheduler("check", pkgcollector.InitCheckScheduler(optional.NewOption(collector), demultiplexer, logReceiver), true) @@ -217,19 +216,19 @@ func run( } func initializeCCCache(ctx context.Context) error { - pollInterval := time.Second * time.Duration(pkgconfig.Datadog().GetInt("cloud_foundry_cc.poll_interval")) + pollInterval := time.Second * time.Duration(pkgconfigsetup.Datadog().GetInt("cloud_foundry_cc.poll_interval")) _, err := cloudfoundry.ConfigureGlobalCCCache( ctx, - pkgconfig.Datadog().GetString("cloud_foundry_cc.url"), - pkgconfig.Datadog().GetString("cloud_foundry_cc.client_id"), - pkgconfig.Datadog().GetString("cloud_foundry_cc.client_secret"), - pkgconfig.Datadog().GetBool("cloud_foundry_cc.skip_ssl_validation"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_cc.url"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_cc.client_id"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_cc.client_secret"), + pkgconfigsetup.Datadog().GetBool("cloud_foundry_cc.skip_ssl_validation"), pollInterval, - pkgconfig.Datadog().GetInt("cloud_foundry_cc.apps_batch_size"), - pkgconfig.Datadog().GetBool("cluster_agent.refresh_on_cache_miss"), - pkgconfig.Datadog().GetBool("cluster_agent.serve_nozzle_data"), - pkgconfig.Datadog().GetBool("cluster_agent.sidecars_tags"), - pkgconfig.Datadog().GetBool("cluster_agent.isolation_segments_tags"), + pkgconfigsetup.Datadog().GetInt("cloud_foundry_cc.apps_batch_size"), + pkgconfigsetup.Datadog().GetBool("cluster_agent.refresh_on_cache_miss"), + pkgconfigsetup.Datadog().GetBool("cluster_agent.serve_nozzle_data"), + pkgconfigsetup.Datadog().GetBool("cluster_agent.sidecars_tags"), + pkgconfigsetup.Datadog().GetBool("cluster_agent.isolation_segments_tags"), nil, ) if err != nil { @@ -239,11 +238,11 @@ func initializeCCCache(ctx context.Context) error { } func initializeBBSCache(ctx context.Context) error { - pollInterval := time.Second * time.Duration(pkgconfig.Datadog().GetInt("cloud_foundry_bbs.poll_interval")) + pollInterval := time.Second * time.Duration(pkgconfigsetup.Datadog().GetInt("cloud_foundry_bbs.poll_interval")) // NOTE: we can't use GetPollInterval in ConfigureGlobalBBSCache, as that causes import cycle - includeListString := pkgconfig.Datadog().GetStringSlice("cloud_foundry_bbs.env_include") - excludeListString := pkgconfig.Datadog().GetStringSlice("cloud_foundry_bbs.env_exclude") + includeListString := pkgconfigsetup.Datadog().GetStringSlice("cloud_foundry_bbs.env_include") + excludeListString := pkgconfigsetup.Datadog().GetStringSlice("cloud_foundry_bbs.env_exclude") includeList := make([]*regexp.Regexp, len(includeListString)) excludeList := make([]*regexp.Regexp, len(excludeListString)) @@ -266,10 +265,10 @@ func initializeBBSCache(ctx context.Context) error { bc, err := cloudfoundry.ConfigureGlobalBBSCache( ctx, - pkgconfig.Datadog().GetString("cloud_foundry_bbs.url"), - pkgconfig.Datadog().GetString("cloud_foundry_bbs.ca_file"), - pkgconfig.Datadog().GetString("cloud_foundry_bbs.cert_file"), - pkgconfig.Datadog().GetString("cloud_foundry_bbs.key_file"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_bbs.url"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_bbs.ca_file"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_bbs.cert_file"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_bbs.key_file"), pollInterval, includeList, excludeList, diff --git a/cmd/cluster-agent/admission/server.go b/cmd/cluster-agent/admission/server.go index f98cc0fef91fa..bff5012e1c4c1 100644 --- a/cmd/cluster-agent/admission/server.go +++ b/cmd/cluster-agent/admission/server.go @@ -20,12 +20,14 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" + "github.com/cihub/seelog" + "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/certificate" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/cihub/seelog" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" admiv1 "k8s.io/api/admission/v1" admiv1beta1 "k8s.io/api/admission/v1beta1" @@ -101,19 +103,19 @@ func (s *Server) Register(uri string, webhookName string, f WebhookFunc, dc dyna // Run starts the kubernetes admission webhook server. func (s *Server) Run(mainCtx context.Context, client kubernetes.Interface) error { var tlsMinVersion uint16 = tls.VersionTLS13 - if config.Datadog().GetBool("cluster_agent.allow_legacy_tls") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.allow_legacy_tls") { tlsMinVersion = tls.VersionTLS10 } - logWriter, _ := config.NewTLSHandshakeErrorWriter(4, seelog.WarnLvl) + logWriter, _ := pkglogsetup.NewTLSHandshakeErrorWriter(4, seelog.WarnLvl) server := &http.Server{ - Addr: fmt.Sprintf(":%d", config.Datadog().GetInt("admission_controller.port")), + Addr: fmt.Sprintf(":%d", pkgconfigsetup.Datadog().GetInt("admission_controller.port")), Handler: s.mux, ErrorLog: stdLog.New(logWriter, "Error from the admission controller http API server: ", 0), TLSConfig: &tls.Config{ GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { secretNs := common.GetResourcesNamespace() - secretName := config.Datadog().GetString("admission_controller.certificate.secret_name") + secretName := pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name") cert, err := certificate.GetCertificateFromSecret(secretNs, secretName, client) if err != nil { log.Errorf("Couldn't fetch certificate: %v", err) diff --git a/cmd/cluster-agent/api/agent/agent.go b/cmd/cluster-agent/api/agent/agent.go index bc82056e2ee45..86a536d77cb20 100644 --- a/cmd/cluster-agent/api/agent/agent.go +++ b/cmd/cluster-agent/api/agent/agent.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/status" "github.com/DataDog/datadog-agent/comp/core/tagger" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -144,7 +144,7 @@ func makeFlare(w http.ResponseWriter, r *http.Request, statusComponent status.Co } } - logFile := config.Datadog().GetString("log_file") + logFile := pkgconfigsetup.Datadog().GetString("log_file") if logFile == "" { logFile = path.DefaultDCALogFile } diff --git a/cmd/cluster-agent/api/listener.go b/cmd/cluster-agent/api/listener.go index fb9886e0f5b44..0e713778be79f 100644 --- a/cmd/cluster-agent/api/listener.go +++ b/cmd/cluster-agent/api/listener.go @@ -14,10 +14,10 @@ import ( "fmt" "net" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // getListener returns a listening connection func getListener() (net.Listener, error) { - return net.Listen("tcp", fmt.Sprintf("0.0.0.0:%v", config.Datadog().GetInt("cluster_agent.cmd_port"))) + return net.Listen("tcp", fmt.Sprintf("0.0.0.0:%v", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"))) } diff --git a/cmd/cluster-agent/api/server.go b/cmd/cluster-agent/api/server.go index 779e795e53cf6..0b6ba72b63f08 100644 --- a/cmd/cluster-agent/api/server.go +++ b/cmd/cluster-agent/api/server.go @@ -41,9 +41,10 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) var ( @@ -79,10 +80,10 @@ func StartServer(ctx context.Context, w workloadmeta.Component, taggerComp tagge return fmt.Errorf("unable to create the api server: %v", err) } // Internal token - util.CreateAndSetAuthToken(pkgconfig.Datadog()) //nolint:errcheck + util.CreateAndSetAuthToken(pkgconfigsetup.Datadog()) //nolint:errcheck // DCA client token - util.InitDCAAuthToken(pkgconfig.Datadog()) //nolint:errcheck + util.InitDCAAuthToken(pkgconfigsetup.Datadog()) //nolint:errcheck // create cert hosts := []string{"127.0.0.1", "localhost"} @@ -107,12 +108,12 @@ func StartServer(ctx context.Context, w workloadmeta.Component, taggerComp tagge MinVersion: tls.VersionTLS13, } - if pkgconfig.Datadog().GetBool("cluster_agent.allow_legacy_tls") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.allow_legacy_tls") { tlsConfig.MinVersion = tls.VersionTLS10 } // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := pkgconfig.NewTLSHandshakeErrorWriter(4, seelog.WarnLvl) + logWriter, _ := pkglogsetup.NewTLSHandshakeErrorWriter(4, seelog.WarnLvl) authInterceptor := grpcutil.AuthInterceptor(func(token string) (interface{}, error) { if token != util.GetDCAAuthToken() { @@ -132,7 +133,7 @@ func StartServer(ctx context.Context, w workloadmeta.Component, taggerComp tagge taggerServer: taggerserver.NewServer(taggerComp), }) - timeout := pkgconfig.Datadog().GetDuration("cluster_agent.server.idle_timeout_seconds") * time.Second + timeout := pkgconfigsetup.Datadog().GetDuration("cluster_agent.server.idle_timeout_seconds") * time.Second srv := grpcutil.NewMuxedGRPCServer( listener.Addr().String(), tlsConfig, diff --git a/cmd/cluster-agent/api/server_test.go b/cmd/cluster-agent/api/server_test.go index e40c38ff141da..b83cc5201164c 100644 --- a/cmd/cluster-agent/api/server_test.go +++ b/cmd/cluster-agent/api/server_test.go @@ -15,14 +15,14 @@ import ( "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestValidateTokenMiddleware(t *testing.T) { mockConfig := configmock.New(t) mockConfig.SetWithoutSource("cluster_agent.auth_token", "abc123") - util.InitDCAAuthToken(config.Datadog()) + util.InitDCAAuthToken(pkgconfigsetup.Datadog()) tests := []struct { path, authToken string diff --git a/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go b/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go index 94e60ff1cb3a0..0dd0420cfe889 100644 --- a/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go +++ b/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go @@ -15,7 +15,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/api" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -23,7 +23,7 @@ import ( func installCloudFoundryMetadataEndpoints(r *mux.Router) { r.HandleFunc("/tags/cf/apps/{nodeName}", api.WithTelemetryWrapper("getCFAppsMetadataForNode", getCFAppsMetadataForNode)).Methods("GET") - if config.Datadog().GetBool("cluster_agent.serve_nozzle_data") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.serve_nozzle_data") { r.HandleFunc("/cf/apps/{guid}", api.WithTelemetryWrapper("getCFApplication", getCFApplication)).Methods("GET") r.HandleFunc("/cf/apps", api.WithTelemetryWrapper("getCFApplications", getCFApplications)).Methods("GET") r.HandleFunc("/cf/org_quotas", api.WithTelemetryWrapper("getCFOrgQuotas", getCFOrgQuotas)).Methods("GET") diff --git a/cmd/cluster-agent/api/v1/clusterchecks.go b/cmd/cluster-agent/api/v1/clusterchecks.go index 441ae2230ab17..47204a5fb7563 100644 --- a/cmd/cluster-agent/api/v1/clusterchecks.go +++ b/cmd/cluster-agent/api/v1/clusterchecks.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent" "github.com/DataDog/datadog-agent/pkg/clusteragent/api" cctypes "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" dcautil "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -193,7 +193,7 @@ func validateClientIP(addr string) (string, error) { return "", fmt.Errorf("cannot parse CLC runner address: %s", addr) } - if addr == "" && config.Datadog().GetBool("cluster_checks.advanced_dispatching_enabled") { + if addr == "" && pkgconfigsetup.Datadog().GetBool("cluster_checks.advanced_dispatching_enabled") { log.Warn("Cluster check dispatching error: cannot get runner IP from http headers. advanced_dispatching_enabled requires agent 6.17 or above.") } diff --git a/cmd/cluster-agent/api/v1/install.go b/cmd/cluster-agent/api/v1/install.go index a3388e3c2008d..ef49a8538eac6 100644 --- a/cmd/cluster-agent/api/v1/install.go +++ b/cmd/cluster-agent/api/v1/install.go @@ -12,7 +12,7 @@ import ( "github.com/gorilla/mux" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/clusteragent" @@ -21,7 +21,7 @@ import ( // InstallMetadataEndpoints registers endpoints for metadata func InstallMetadataEndpoints(r *mux.Router, w workloadmeta.Component) { log.Debug("Registering metadata endpoints") - if config.Datadog().GetBool("cloud_foundry") { + if pkgconfigsetup.Datadog().GetBool("cloud_foundry") { installCloudFoundryMetadataEndpoints(r) } else { installKubernetesMetadataEndpoints(r, w) diff --git a/cmd/cluster-agent/api/v1/kubernetes_metadata.go b/cmd/cluster-agent/api/v1/kubernetes_metadata.go index 53a63322caa24..2a3e046e4f522 100644 --- a/cmd/cluster-agent/api/v1/kubernetes_metadata.go +++ b/cmd/cluster-agent/api/v1/kubernetes_metadata.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/api" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" as "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" apicommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/controllers" @@ -109,7 +109,7 @@ func getNodeLabels(w http.ResponseWriter, r *http.Request, wmeta workloadmeta.Co } func getNodeAnnotations(w http.ResponseWriter, r *http.Request, wmeta workloadmeta.Component) { - getNodeMetadata(w, r, wmeta, func(km *workloadmeta.KubernetesMetadata) map[string]string { return km.Annotations }, "annotations", config.Datadog().GetStringSlice("kubernetes_node_annotations_as_host_aliases")) + getNodeMetadata(w, r, wmeta, func(km *workloadmeta.KubernetesMetadata) map[string]string { return km.Annotations }, "annotations", pkgconfigsetup.Datadog().GetStringSlice("kubernetes_node_annotations_as_host_aliases")) } // getNamespaceMetadataWithTransformerFunc is used when the node agent hits the DCA for some (or all) metadata of a specific namespace diff --git a/cmd/cluster-agent/custommetrics/server.go b/cmd/cluster-agent/custommetrics/server.go index bc7f2f9091d6c..d16ca95bb87c2 100644 --- a/cmd/cluster-agent/custommetrics/server.go +++ b/cmd/cluster-agent/custommetrics/server.go @@ -20,7 +20,7 @@ import ( datadogclient "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/externalmetrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" as "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -55,7 +55,7 @@ func RunServer(ctx context.Context, apiCl *as.APIClient, datadogCl optional.Opti cmd.FlagSet = pflag.NewFlagSet(cmd.Name, pflag.ExitOnError) var c []string - for k, v := range config.Datadog().GetStringMapString(metricsServerConf) { + for k, v := range pkgconfigsetup.Datadog().GetStringMapString(metricsServerConf) { c = append(c, fmt.Sprintf("--%s=%s", k, v)) } @@ -95,7 +95,7 @@ func (a *DatadogMetricsAdapter) makeProviderOrDie(ctx context.Context, apiCl *as return nil, err } - if config.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") { + if pkgconfigsetup.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") { if dc, ok := datadogCl.Get(); ok { return externalmetrics.NewDatadogMetricProvider(ctx, apiCl, dc) } @@ -122,9 +122,9 @@ func (a *DatadogMetricsAdapter) Config() (*apiserver.Config, error) { if !a.FlagSet.Lookup("secure-port").Changed { // Ensure backward compatibility. 443 by default, but will error out if incorrectly set. // refer to apiserver code in k8s.io/apiserver/pkg/server/option/serving.go - a.SecureServing.BindPort = config.Datadog().GetInt("external_metrics_provider.port") + a.SecureServing.BindPort = pkgconfigsetup.Datadog().GetInt("external_metrics_provider.port") // Default in External Metrics is TLS 1.2 - if !config.Datadog().GetBool("cluster_agent.allow_legacy_tls") { + if !pkgconfigsetup.Datadog().GetBool("cluster_agent.allow_legacy_tls") { a.SecureServing.MinTLSVersion = tlsVersion13Str } } diff --git a/cmd/cluster-agent/subcommands/check/command.go b/cmd/cluster-agent/subcommands/check/command.go index 77cc9135c9b7f..cceb56a853ab4 100644 --- a/cmd/cluster-agent/subcommands/check/command.go +++ b/cmd/cluster-agent/subcommands/check/command.go @@ -11,7 +11,7 @@ package check import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/command" "github.com/DataDog/datadog-agent/pkg/cli/subcommands/check" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pkgcommon "github.com/DataDog/datadog-agent/pkg/util/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection" @@ -22,7 +22,7 @@ import ( func Commands(globalParams *command.GlobalParams) []*cobra.Command { ctx, _ := pkgcommon.GetMainCtxCancel() // Create the Leader election engine without initializing it - if pkgconfig.Datadog().GetBool("leader_election") { + if pkgconfigsetup.Datadog().GetBool("leader_election") { leaderelection.CreateGlobalLeaderEngine(ctx) } diff --git a/cmd/cluster-agent/subcommands/config/command.go b/cmd/cluster-agent/subcommands/config/command.go index 1d01668fb5f28..24a6f44487990 100644 --- a/cmd/cluster-agent/subcommands/config/command.go +++ b/cmd/cluster-agent/subcommands/config/command.go @@ -14,9 +14,9 @@ import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/command" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/cli/subcommands/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/spf13/cobra" ) @@ -40,7 +40,7 @@ func newSettingsClient() (settings.Client, error) { apiConfigURL := fmt.Sprintf( "https://localhost:%v/config", - pkgconfig.Datadog().GetInt("cluster_agent.cmd_port"), + pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"), ) return settingshttp.NewClient(c, apiConfigURL, "datadog-cluster-agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil diff --git a/cmd/cluster-agent/subcommands/metamap/command.go b/cmd/cluster-agent/subcommands/metamap/command.go index f238dfeaf8a22..a5aa1281b6486 100644 --- a/cmd/cluster-agent/subcommands/metamap/command.go +++ b/cmd/cluster-agent/subcommands/metamap/command.go @@ -20,7 +20,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/render" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -71,13 +71,13 @@ func getMetadataMap(nodeName string) error { c := util.GetClient(false) // FIX: get certificates right then make this true var urlstr string if nodeName == "" { - urlstr = fmt.Sprintf("https://localhost:%v/api/v1/tags/pod", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr = fmt.Sprintf("https://localhost:%v/api/v1/tags/pod", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) } else { - urlstr = fmt.Sprintf("https://localhost:%v/api/v1/tags/pod/%s", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port"), nodeName) + urlstr = fmt.Sprintf("https://localhost:%v/api/v1/tags/pod/%s", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"), nodeName) } // Set session token - e = util.SetAuthToken(pkgconfig.Datadog()) + e = util.SetAuthToken(pkgconfigsetup.Datadog()) if e != nil { return e } diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index abc7704e601bc..0b99a2c35125b 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -73,9 +73,9 @@ import ( clusteragentMetricsStatus "github.com/DataDog/datadog-agent/pkg/clusteragent/metricsstatus" orchestratorStatus "github.com/DataDog/datadog-agent/pkg/clusteragent/orchestrator" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" rcclient "github.com/DataDog/datadog-agent/pkg/config/remote/client" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" hostnameStatus "github.com/DataDog/datadog-agent/pkg/status/clusteragent/hostname" endpointsStatus "github.com/DataDog/datadog-agent/pkg/status/endpoints" "github.com/DataDog/datadog-agent/pkg/status/health" @@ -136,12 +136,10 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { core.Bundle(), forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithResolvers(), defaultforwarder.WithDisableAPIKeyChecking())), compressionimpl.Module(), - demultiplexerimpl.Module(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewDefaultParams()), + demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams()), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDefaultParams()), eventplatformimpl.Module(eventplatformimpl.NewDisabledParams()), eventplatformreceiverimpl.Module(), - fx.Supply(demultiplexerimpl.NewDefaultParams()), // setup workloadmeta wmcatalog.GetCatalog(), workloadmetafx.Module(workloadmeta.Params{ @@ -339,7 +337,7 @@ func start(log log.Component, // Initialize and start remote configuration client var rcClient *rcclient.Client rcserv, isSet := rcService.Get() - if pkgconfig.IsRemoteConfigEnabled(config) && isSet { + if pkgconfigsetup.IsRemoteConfigEnabled(config) && isSet { var products []string if config.GetBool("admission_controller.auto_instrumentation.patcher.enabled") { products = append(products, state.ProductAPMTracing) diff --git a/cmd/cluster-agent/subcommands/start/compliance.go b/cmd/cluster-agent/subcommands/start/compliance.go index 157d6217913fe..780e390331cee 100644 --- a/cmd/cluster-agent/subcommands/start/compliance.go +++ b/cmd/cluster-agent/subcommands/start/compliance.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/compliance" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/client" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -44,12 +44,12 @@ func runCompliance(ctx context.Context, senderManager sender.SenderManager, wmet } func newLogContext(logsConfig *config.LogsConfigKeys, endpointPrefix string) (*config.Endpoints, *client.DestinationsContext, error) { - endpoints, err := config.BuildHTTPEndpointsWithConfig(coreconfig.Datadog(), logsConfig, endpointPrefix, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) + endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), logsConfig, endpointPrefix, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) if err != nil { - endpoints, err = config.BuildHTTPEndpoints(coreconfig.Datadog(), intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) + endpoints, err = config.BuildHTTPEndpoints(pkgconfigsetup.Datadog(), intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) if err == nil { - httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, coreconfig.Datadog()) - endpoints, err = config.BuildEndpoints(coreconfig.Datadog(), httpConnectivity, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) + httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, pkgconfigsetup.Datadog()) + endpoints, err = config.BuildEndpoints(pkgconfigsetup.Datadog(), httpConnectivity, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) } } @@ -68,7 +68,7 @@ func newLogContext(logsConfig *config.LogsConfigKeys, endpointPrefix string) (*c } func newLogContextCompliance() (*config.Endpoints, *client.DestinationsContext, error) { - logsConfigComplianceKeys := config.NewLogsConfigKeys("compliance_config.endpoints.", coreconfig.Datadog()) + logsConfigComplianceKeys := config.NewLogsConfigKeys("compliance_config.endpoints.", pkgconfigsetup.Datadog()) return newLogContext(logsConfigComplianceKeys, "cspm-intake.") } @@ -79,8 +79,8 @@ func startCompliance(senderManager sender.SenderManager, wmeta workloadmeta.Comp } stopper.Add(ctx) - configDir := coreconfig.Datadog().GetString("compliance_config.dir") - checkInterval := coreconfig.Datadog().GetDuration("compliance_config.check_interval") + configDir := pkgconfigsetup.Datadog().GetString("compliance_config.dir") + checkInterval := pkgconfigsetup.Datadog().GetDuration("compliance_config.check_interval") hname, err := hostname.Get(context.TODO()) if err != nil { diff --git a/cmd/cluster-agent/subcommands/status/command.go b/cmd/cluster-agent/subcommands/status/command.go index 7f4e3084cb6f8..c92a08d568461 100644 --- a/cmd/cluster-agent/subcommands/status/command.go +++ b/cmd/cluster-agent/subcommands/status/command.go @@ -25,7 +25,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -80,7 +80,7 @@ func run(log log.Component, config config.Component, cliParams *cliParams) error url := url.URL{ Scheme: "https", - Host: fmt.Sprintf("localhost:%v", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")), + Host: fmt.Sprintf("localhost:%v", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")), Path: "/status", RawQuery: v.Encode(), } diff --git a/cmd/dogstatsd/subcommands/start/command.go b/cmd/dogstatsd/subcommands/start/command.go index 103c2314d10d2..0e1193634a0b0 100644 --- a/cmd/dogstatsd/subcommands/start/command.go +++ b/cmd/dogstatsd/subcommands/start/command.go @@ -54,12 +54,13 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/runner" metadatarunnerimpl "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -77,7 +78,7 @@ type DogstatsdComponents struct { const ( // loggerName is the name of the dogstatsd logger - loggerName pkgconfig.LoggerName = "DSD" + loggerName pkglogsetup.LoggerName = "DSD" ) // MakeCommand returns the start subcommand for the 'dogstatsd' command. @@ -148,10 +149,12 @@ func RunDogstatsdFct(cliParams *CLIParams, defaultConfPath string, defaultLogFil }), compressionimpl.Module(), - demultiplexerimpl.Module(), + demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams( + demultiplexerimpl.WithContinueOnMissingHostname(), + demultiplexerimpl.WithDogstatsdNoAggregationPipelineConfig(), + )), secretsimpl.Module(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewDisabledParams()), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDisabledParams()), eventplatformimpl.Module(eventplatformimpl.NewDisabledParams()), eventplatformreceiverimpl.Module(), hostnameimpl.Module(), @@ -161,12 +164,6 @@ func RunDogstatsdFct(cliParams *CLIParams, defaultConfPath string, defaultLogFil fx.Provide(func(demuxInstance demultiplexer.Component) serializer.MetricSerializer { return demuxInstance.Serializer() }), - fx.Provide(func(config config.Component) demultiplexerimpl.Params { - params := demultiplexerimpl.NewDefaultParams() - params.EnableNoAggregationPipeline = config.GetBool("dogstatsd_no_aggregation_pipeline") - params.ContinueOnMissingHostname = true - return params - }), fx.Supply(resourcesimpl.Disabled()), metadatarunnerimpl.Module(), resourcesimpl.Module(), @@ -246,7 +243,7 @@ func RunDogstatsd(_ context.Context, cliParams *CLIParams, config config.Compone }() // Setup logger - syslogURI := pkgconfig.GetSyslogURI() + syslogURI := pkglogsetup.GetSyslogURI(pkgconfigsetup.Datadog()) logFile := config.GetString("log_file") if logFile == "" { logFile = params.DefaultLogFile @@ -257,7 +254,7 @@ func RunDogstatsd(_ context.Context, cliParams *CLIParams, config config.Compone logFile = "" } - err = pkgconfig.SetupLogger( + err = pkglogsetup.SetupLogger( loggerName, config.GetString("log_level"), logFile, @@ -265,6 +262,7 @@ func RunDogstatsd(_ context.Context, cliParams *CLIParams, config config.Compone config.GetBool("syslog_rfc"), config.GetBool("log_to_console"), config.GetBool("log_format_json"), + pkgconfigsetup.Datadog(), ) if err != nil { log.Criticalf("Unable to setup logger: %s", err) diff --git a/cmd/installer/command/command.go b/cmd/installer/command/command.go index a40f3926ed3ed..9e6d9ecdda890 100644 --- a/cmd/installer/command/command.go +++ b/cmd/installer/command/command.go @@ -13,7 +13,7 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // common constants for all the updater subcommands. @@ -51,7 +51,7 @@ type SubcommandFactory func(globalParams *GlobalParams) []*cobra.Command // MakeCommand makes the top-level Cobra command for this app. func MakeCommand(subcommandFactories []SubcommandFactory) *cobra.Command { globalParams := GlobalParams{ - ConfFilePath: config.DefaultUpdaterLogFile, + ConfFilePath: pkgconfigsetup.DefaultUpdaterLogFile, } // AgentCmd is the root command diff --git a/cmd/installer/main.go b/cmd/installer/main.go index 51bd2ce7bec57..8f9da11bc18f0 100644 --- a/cmd/installer/main.go +++ b/cmd/installer/main.go @@ -3,6 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. +//go:build !bootstraper + // Package main implements 'installer'. package main diff --git a/cmd/installer/main_bootstraper.go b/cmd/installer/main_bootstraper.go new file mode 100644 index 0000000000000..a8d399a3856ed --- /dev/null +++ b/cmd/installer/main_bootstraper.go @@ -0,0 +1,28 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +//go:build bootstraper + +// Package main implements 'installer'. +package main + +import ( + "fmt" + "os" + + "github.com/DataDog/datadog-agent/cmd/installer/subcommands/installer" + "github.com/DataDog/datadog-agent/cmd/installer/user" + "github.com/DataDog/datadog-agent/cmd/internal/runcmd" +) + +func main() { + if !user.IsRoot() { + fmt.Fprintln(os.Stderr, "This command requires root privileges.") + os.Exit(1) + } + cmd := installer.BootstrapCommand() + cmd.SilenceUsage = true + os.Exit(runcmd.Run(cmd)) +} diff --git a/cmd/installer/subcommands/daemon/api.go b/cmd/installer/subcommands/daemon/api.go index a55df5940aaf3..2b1eeb174a67f 100644 --- a/cmd/installer/subcommands/daemon/api.go +++ b/cmd/installer/subcommands/daemon/api.go @@ -91,7 +91,44 @@ func apiCommands(global *command.GlobalParams) []*cobra.Command { }) }, } - return []*cobra.Command{setCatalogCmd, startExperimentCmd, stopExperimentCmd, promoteExperimentCmd, installCmd} + startConfigExperimentCmd := &cobra.Command{ + Use: "start-config-experiment package version", + Aliases: []string{"start-config"}, + Short: "Starts an experiment", + Args: cobra.ExactArgs(2), + RunE: func(_ *cobra.Command, args []string) error { + return experimentFxWrapper(startConfig, &cliParams{ + GlobalParams: *global, + pkg: args[0], + version: args[1], + }) + }, + } + stopConfigExperimentCmd := &cobra.Command{ + Use: "stop-config-experiment package", + Aliases: []string{"stop-config"}, + Short: "Stops an experiment", + Args: cobra.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + return experimentFxWrapper(stopConfig, &cliParams{ + GlobalParams: *global, + pkg: args[0], + }) + }, + } + promoteConfigExperimentCmd := &cobra.Command{ + Use: "promote-config-experiment package", + Aliases: []string{"promote-config"}, + Short: "Promotes an experiment", + Args: cobra.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + return experimentFxWrapper(promoteConfig, &cliParams{ + GlobalParams: *global, + pkg: args[0], + }) + }, + } + return []*cobra.Command{setCatalogCmd, startExperimentCmd, stopExperimentCmd, promoteExperimentCmd, installCmd, startConfigExperimentCmd, stopConfigExperimentCmd, promoteConfigExperimentCmd} } func experimentFxWrapper(f interface{}, params *cliParams) error { @@ -144,6 +181,33 @@ func promote(params *cliParams, client localapiclient.Component) error { return nil } +func startConfig(params *cliParams, client localapiclient.Component) error { + err := client.StartConfigExperiment(params.pkg, params.version) + if err != nil { + fmt.Println("Error starting config experiment:", err) + return err + } + return nil +} + +func stopConfig(params *cliParams, client localapiclient.Component) error { + err := client.StopConfigExperiment(params.pkg) + if err != nil { + fmt.Println("Error stopping config experiment:", err) + return err + } + return nil +} + +func promoteConfig(params *cliParams, client localapiclient.Component) error { + err := client.PromoteConfigExperiment(params.pkg) + if err != nil { + fmt.Println("Error promoting config experiment:", err) + return err + } + return nil +} + func install(params *cliParams, client localapiclient.Component) error { err := client.Install(params.pkg, params.version) if err != nil { diff --git a/cmd/installer/subcommands/daemon/run.go b/cmd/installer/subcommands/daemon/run.go index d614a70a277eb..b69b6dad8a5fc 100644 --- a/cmd/installer/subcommands/daemon/run.go +++ b/cmd/installer/subcommands/daemon/run.go @@ -7,6 +7,7 @@ package daemon import ( "context" + "github.com/spf13/cobra" "go.uber.org/fx" @@ -23,8 +24,8 @@ import ( "github.com/DataDog/datadog-agent/comp/updater/localapi/localapiimpl" "github.com/DataDog/datadog-agent/comp/updater/telemetry/telemetryimpl" "github.com/DataDog/datadog-agent/comp/updater/updater/updaterimpl" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/remote/service" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func runCommand(global *command.GlobalParams) *cobra.Command { @@ -47,7 +48,7 @@ func getCommonFxOption(global *command.GlobalParams) fx.Option { ConfigParams: config.NewAgentParams(global.ConfFilePath), SecretParams: secrets.NewEnabledParams(), SysprobeConfigParams: sysprobeconfigimpl.NewParams(), - LogParams: log.ForDaemon("INSTALLER", "installer.log_file", pkgconfig.DefaultUpdaterLogFile), + LogParams: log.ForDaemon("INSTALLER", "installer.log_file", pkgconfigsetup.DefaultUpdaterLogFile), }), core.Bundle(), fx.Supply(&rcservice.Params{ diff --git a/cmd/installer/subcommands/daemon/status.tmpl b/cmd/installer/subcommands/daemon/status.tmpl index 714ed431f3214..045b819d53764 100644 --- a/cmd/installer/subcommands/daemon/status.tmpl +++ b/cmd/installer/subcommands/daemon/status.tmpl @@ -7,16 +7,38 @@ Datadog Installer v{{ htmlSafe .Version }} {{ greenText "●" }} stable: v{{ htmlSafe $package.Stable }} {{- else }} {{ redText "●" }} stable: none - {{- end }}{{ if $package.Experiment }} + {{- end }} + {{- if $package.Experiment }} {{ yellowText "●" }} experiment: v{{ htmlSafe $package.Experiment }} {{- else }} ● experiment: none {{- end }} - {{- if eq $name "datadog-apm-inject" }}{{ template "datadog-apm-inject" $.ApmInjectionStatus }}{{ end }} + + {{- if eq $name "datadog-apm-inject" }} + {{ template "datadog-apm-inject" $.ApmInjectionStatus }} + {{- end }} + + {{- range $remoteConfig := $.RemoteConfigState }} + {{- if eq $remoteConfig.Package $name }} + Remote configuration client state: + StableVersion: {{ $remoteConfig.StableVersion }} + ExperimentVersion: {{ $remoteConfig.ExperimentVersion }} + StableConfigVersion: {{ $remoteConfig.StableConfigVersion }} + ExperimentConfigVersion: {{ $remoteConfig.ExperimentConfigVersion }} + RemoteConfigVersion: {{ $remoteConfig.RemoteConfigVersion }} + Task: + {{- if $remoteConfig.Task }} + Id: {{ $remoteConfig.Task.Id }} + State: {{ $remoteConfig.Task.State }} + {{- if $remoteConfig.Task.Error }} + Error: {{ $remoteConfig.Task.Error }} + {{- end }} + {{- else }} + No task available + {{- end }} + {{- end }} + {{- end }} {{ end -}} -{{- if .RemoteConfigState }} -{{ template "remote-config-state" $.RemoteConfigState }} -{{- end -}} {{- define "datadog-apm-inject" }} Instrumentation status: @@ -32,26 +54,4 @@ Datadog Installer v{{ htmlSafe .Version }} {{- else -}} {{ redText "●" }} Docker: Not instrumented {{- end }} -{{- end -}} - -{{- define "remote-config-state" }} - Remote configuration client state: - {{ range . }} - {{ boldText .Package }} - StableVersion: {{ .StableVersion }} - ExperimentVersion: {{ .ExperimentVersion }} - StableConfigVersion: {{ .StableConfigVersion }} - ExperimentConfigVersion: {{ .ExperimentConfigVersion }} - RemoteConfigVersion: {{ .RemoteConfigVersion }} - Task: - {{- if .Task }} - Id: {{ .Task.Id }} - State: {{ .Task.State }} - {{- if .Task.Error }} - Error: {{ .Task.Error }} - {{- end }} - {{- else }} - No task available - {{- end }} - {{ end }} {{- end }} diff --git a/cmd/installer/subcommands/installer/command.go b/cmd/installer/subcommands/installer/command.go index 32ba5484edd54..eb05c41908490 100644 --- a/cmd/installer/subcommands/installer/command.go +++ b/cmd/installer/subcommands/installer/command.go @@ -46,15 +46,24 @@ const ( envAgentDistChannel = "DD_AGENT_DIST_CHANNEL" ) +// BootstrapCommand returns the bootstrap command. +func BootstrapCommand() *cobra.Command { + return bootstrapCommand() +} + // Commands returns the installer subcommands. func Commands(_ *command.GlobalParams) []*cobra.Command { return []*cobra.Command{ bootstrapCommand(), installCommand(), + setupCommand(), removeCommand(), installExperimentCommand(), removeExperimentCommand(), promoteExperimentCommand(), + installConfigExperimentCommand(), + removeConfigExperimentCommand(), + promoteConfigExperimentCommand(), garbageCollectCommand(), purgeCommand(), isInstalledCommand(), @@ -109,7 +118,7 @@ func newInstallerCmd(operation string) (_ *installerCmd, err error) { cmd.Stop(err) } }() - i, err := installer.NewInstaller(cmd.env) + i, err := installer.NewInstaller(cmd.env, "opt/datadog-packages/run/rc/cmd") if err != nil { return nil, err } @@ -119,6 +128,14 @@ func newInstallerCmd(operation string) (_ *installerCmd, err error) { }, nil } +func (i *installerCmd) stop(err error) { + i.cmd.Stop(err) + err = i.Installer.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to close Installer: %v\n", err) + } +} + type bootstraperCmd struct { *cmd } @@ -149,7 +166,6 @@ func newBootstraperCmd(operation string) *bootstraperCmd { func newTelemetry(env *env.Env) *telemetry.Telemetry { if env.APIKey == "" { - fmt.Printf("telemetry disabled: missing DD_API_KEY\n") return nil } t, err := telemetry.NewTelemetry(env, "datadog-installer") @@ -216,6 +232,24 @@ func bootstrapCommand() *cobra.Command { return cmd } +func setupCommand() *cobra.Command { + var timeout time.Duration + cmd := &cobra.Command{ + Use: "setup", + Hidden: true, + GroupID: "installer", + RunE: func(_ *cobra.Command, _ []string) (err error) { + cmd := newCmd("setup") + defer func() { cmd.Stop(err) }() + ctx, cancel := context.WithTimeout(cmd.ctx, timeout) + defer cancel() + return installer.Setup(ctx, cmd.env) + }, + } + cmd.Flags().DurationVarP(&timeout, "timeout", "T", 3*time.Minute, "timeout to install with") + return cmd +} + func installCommand() *cobra.Command { var installArgs []string cmd := &cobra.Command{ @@ -228,7 +262,7 @@ func installCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.url", args[0]) return i.Install(i.ctx, args[0], installArgs) }, @@ -248,7 +282,7 @@ func removeCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.package", args[0]) return i.Remove(i.ctx, args[0]) }, @@ -267,7 +301,7 @@ func purgeCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.Purge(i.ctx) return nil }, @@ -286,7 +320,7 @@ func installExperimentCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.url", args[0]) return i.InstallExperiment(i.ctx, args[0]) }, @@ -305,7 +339,7 @@ func removeExperimentCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.package", args[0]) return i.RemoveExperiment(i.ctx, args[0]) }, @@ -324,7 +358,7 @@ func promoteExperimentCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.package", args[0]) return i.PromoteExperiment(i.ctx, args[0]) }, @@ -332,6 +366,64 @@ func promoteExperimentCommand() *cobra.Command { return cmd } +func installConfigExperimentCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "install-config-experiment ", + Short: "Install a config experiment", + GroupID: "installer", + Args: cobra.ExactArgs(2), + RunE: func(_ *cobra.Command, args []string) (err error) { + i, err := newInstallerCmd("install_config_experiment") + if err != nil { + return err + } + defer func() { i.Stop(err) }() + i.span.SetTag("params.package", args[0]) + i.span.SetTag("params.version", args[1]) + return i.InstallConfigExperiment(i.ctx, args[0], args[1]) + }, + } + return cmd +} + +func removeConfigExperimentCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "remove-config-experiment ", + Short: "Remove a config experiment", + GroupID: "installer", + Args: cobra.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) (err error) { + i, err := newInstallerCmd("remove_config_experiment") + if err != nil { + return err + } + defer func() { i.Stop(err) }() + i.span.SetTag("params.package", args[0]) + return i.RemoveConfigExperiment(i.ctx, args[0]) + }, + } + return cmd +} + +func promoteConfigExperimentCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "promote-config-experiment ", + Short: "Promote a config experiment", + GroupID: "installer", + Args: cobra.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) (err error) { + i, err := newInstallerCmd("promote_config_experiment") + if err != nil { + return err + } + defer func() { i.Stop(err) }() + i.span.SetTag("params.package", args[0]) + return i.PromoteConfigExperiment(i.ctx, args[0]) + }, + } + return cmd +} + func garbageCollectCommand() *cobra.Command { cmd := &cobra.Command{ Use: "garbage-collect", @@ -343,7 +435,7 @@ func garbageCollectCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) return i.GarbageCollect(i.ctx) }, } @@ -366,7 +458,7 @@ func isInstalledCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) installed, err := i.IsInstalled(i.ctx, args[0]) if err != nil { return err diff --git a/cmd/otel-agent/config/agent_config_test.go b/cmd/otel-agent/config/agent_config_test.go index c985c24859da2..fc6b4dd6b21ff 100644 --- a/cmd/otel-agent/config/agent_config_test.go +++ b/cmd/otel-agent/config/agent_config_test.go @@ -7,6 +7,8 @@ package config import ( "context" + "fmt" + "io/fs" "os" "strings" "testing" @@ -174,15 +176,25 @@ func (suite *ConfigTestSuite) TestEnvBadLogLevel() { assert.Error(t, err) } +func (suite *ConfigTestSuite) TestBadDDConfigFile() { + t := suite.T() + fileName := "testdata/config_default.yaml" + ddFileName := "testdata/doesnotexists.yaml" + _, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) + + assert.ErrorIs(t, err, fs.ErrNotExist) +} + func (suite *ConfigTestSuite) TestBadLogLevel() { t := suite.T() fileName := "testdata/config_default.yaml" ddFileName := "testdata/datadog_bad_log_level.yaml" _, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) - // log_level from service config takes precedence -> more verbose - // ddFlleName configures level warn, Telemetry defaults to info - assert.Error(t, err) + expectedError := fmt.Sprintf( + "invalid log level (%v) set in the Datadog Agent configuration", + pkgconfigsetup.Datadog().GetString("log_level")) + assert.ErrorContains(t, err, expectedError) } func (suite *ConfigTestSuite) TestNoDDExporter() { diff --git a/cmd/otel-agent/config/testdata/datadog_bad_log_level.yaml b/cmd/otel-agent/config/testdata/datadog_bad_log_level.yaml new file mode 100644 index 0000000000000..be99720314f33 --- /dev/null +++ b/cmd/otel-agent/config/testdata/datadog_bad_log_level.yaml @@ -0,0 +1,5 @@ +log_level: yabadabadoo + +otelcollector: + enabled: true + extension_url: "https://localhost:8888" diff --git a/cmd/otel-agent/main.go b/cmd/otel-agent/main.go index 716aafbe70023..9d75583cf85ce 100644 --- a/cmd/otel-agent/main.go +++ b/cmd/otel-agent/main.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/internal/runcmd" "github.com/DataDog/datadog-agent/cmd/otel-agent/command" "github.com/DataDog/datadog-agent/pkg/util/flavor" + _ "github.com/DataDog/datadog-agent/pkg/version" ) func main() { diff --git a/cmd/otel-agent/subcommands/run/command.go b/cmd/otel-agent/subcommands/run/command.go index 2373bc4453363..10f85edd4ca67 100644 --- a/cmd/otel-agent/subcommands/run/command.go +++ b/cmd/otel-agent/subcommands/run/command.go @@ -37,7 +37,6 @@ import ( collectorcontribFx "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/fx" collectordef "github.com/DataDog/datadog-agent/comp/otelcol/collector/def" collectorfx "github.com/DataDog/datadog-agent/comp/otelcol/collector/fx" - configstorefx "github.com/DataDog/datadog-agent/comp/otelcol/configstore/fx" converter "github.com/DataDog/datadog-agent/comp/otelcol/converter/def" converterfx "github.com/DataDog/datadog-agent/comp/otelcol/converter/fx" "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline" @@ -107,7 +106,6 @@ func runOTelAgentCommand(ctx context.Context, params *subcommands.GlobalParams, fx.Provide(func(cp converter.Component) confmap.Converter { return cp }), - configstorefx.Module(), fx.Provide(func() (coreconfig.Component, error) { c, err := agentConfig.NewConfigComponent(context.Background(), params.CoreConfPath, params.ConfPaths) if err != nil { diff --git a/cmd/process-agent/command/command.go b/cmd/process-agent/command/command.go index 649a1c21cf614..4cc6f355b30a0 100644 --- a/cmd/process-agent/command/command.go +++ b/cmd/process-agent/command/command.go @@ -18,16 +18,17 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/filesystem" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) //nolint:revive // TODO(PROC) Fix revive linter -const LoggerName config.LoggerName = "PROCESS" +const LoggerName pkglogsetup.LoggerName = "PROCESS" // DaemonLogParams are the log params should be given to the `core.BundleParams` for when the process agent is running as a daemon -var DaemonLogParams = log.ForDaemon(string(LoggerName), "process_config.log_file", config.DefaultProcessAgentLogFile) +var DaemonLogParams = log.ForDaemon(string(LoggerName), "process_config.log_file", pkgconfigsetup.DefaultProcessAgentLogFile) // OneShotLogParams are the log params that are given to commands var OneShotLogParams = log.ForOneShot(string(LoggerName), "info", true) diff --git a/cmd/process-agent/command/main_common.go b/cmd/process-agent/command/main_common.go index 73daed01b341b..cd0fc37d6cb33 100644 --- a/cmd/process-agent/command/main_common.go +++ b/cmd/process-agent/command/main_common.go @@ -52,8 +52,8 @@ import ( remoteconfig "github.com/DataDog/datadog-agent/comp/remote-config" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" "github.com/DataDog/datadog-agent/pkg/collector/python" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/config/model" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" "github.com/DataDog/datadog-agent/pkg/process/metadata/workloadmeta/collector" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -174,7 +174,10 @@ func runApp(ctx context.Context, globalParams *GlobalParams) error { // Provide the corresponding tagger Params to configure the tagger fx.Provide(func(c config.Component) tagger.Params { - if c.GetBool("process_config.remote_tagger") { + if c.GetBool("process_config.remote_tagger") || + // If the agent is running in ECS or ECS Fargate and the ECS task collection is enabled, use the remote tagger + // as remote tagger can return more tags than the local tagger. + ((env.IsECS() || env.IsECSFargate()) && c.GetBool("ecs_task_collection_enabled")) { return tagger.NewNodeRemoteTaggerParams() } return tagger.NewTaggerParams() @@ -312,7 +315,7 @@ func initMisc(deps miscDeps) error { // shouldStayAlive determines whether the process agent should stay alive when no checks are running. // This can happen when the checks are running on the core agent but a process agent container is // still brought up. The process-agent is kept alive to prevent crash loops. -func shouldStayAlive(cfg ddconfig.Reader) bool { +func shouldStayAlive(cfg model.Reader) bool { if env.IsKubernetes() && cfg.GetBool("process_config.run_in_core_agent.enabled") { log.Warn("The process-agent is staying alive to prevent crash loops due to the checks running on the core agent. Thus, the process-agent is idle. Update your Helm chart or Datadog Operator to the latest version to prevent this (https://docs.datadoghq.com/containers/kubernetes/installation/).") return true diff --git a/cmd/process-agent/subcommands/check/check.go b/cmd/process-agent/subcommands/check/check.go index d91447f346d19..be338e8df2252 100644 --- a/cmd/process-agent/subcommands/check/check.go +++ b/cmd/process-agent/subcommands/check/check.go @@ -182,10 +182,12 @@ func RunCheckCmd(deps Dependencies) error { names = append(names, ch.Name()) _, processModuleEnabled := deps.Syscfg.SysProbeObject().EnabledModules[sysconfig.ProcessModule] + _, networkTracerModuleEnabled := deps.Syscfg.SysProbeObject().EnabledModules[sysconfig.NetworkTracerModule] cfg := &checks.SysProbeConfig{ - MaxConnsPerMessage: deps.Syscfg.SysProbeObject().MaxConnsPerMessage, - SystemProbeAddress: deps.Syscfg.SysProbeObject().SocketAddress, - ProcessModuleEnabled: processModuleEnabled, + MaxConnsPerMessage: deps.Syscfg.SysProbeObject().MaxConnsPerMessage, + SystemProbeAddress: deps.Syscfg.SysProbeObject().SocketAddress, + ProcessModuleEnabled: processModuleEnabled, + NetworkTracerModuleEnabled: networkTracerModuleEnabled, } if !matchingCheck(deps.CliParams.checkName, ch) { diff --git a/cmd/process-agent/subcommands/config/config.go b/cmd/process-agent/subcommands/config/config.go index e92c853ab5799..1acd0ec15e5e2 100644 --- a/cmd/process-agent/subcommands/config/config.go +++ b/cmd/process-agent/subcommands/config/config.go @@ -18,10 +18,11 @@ import ( "github.com/DataDog/datadog-agent/comp/process" "github.com/DataDog/datadog-agent/pkg/api/util" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/fetcher" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -179,9 +180,9 @@ func getConfigValue(deps dependencies, args []string) error { return nil } -func getClient(cfg ddconfig.Reader) (settings.Client, error) { +func getClient(cfg model.Reader) (settings.Client, error) { httpClient := apiutil.GetClient(false) - ipcAddress, err := ddconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) port := cfg.GetInt("process_config.cmd_port") if port <= 0 { diff --git a/cmd/process-agent/subcommands/status/status.go b/cmd/process-agent/subcommands/status/status.go index 99e0c03741f97..2660ee8fb71b4 100644 --- a/cmd/process-agent/subcommands/status/status.go +++ b/cmd/process-agent/subcommands/status/status.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/comp/process" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/collector/python" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/util/status" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -134,7 +134,7 @@ func getAndWriteStatus(log log.Component, statusURL string, w io.Writer) { } func getStatusURL() (string, error) { - addressPort, err := ddconfig.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("config error: %s", err.Error()) } diff --git a/cmd/process-agent/subcommands/status/status_test.go b/cmd/process-agent/subcommands/status/status_test.go index dfee6c3d3eb33..e6aabd23aeb17 100644 --- a/cmd/process-agent/subcommands/status/status_test.go +++ b/cmd/process-agent/subcommands/status/status_test.go @@ -19,8 +19,8 @@ import ( "github.com/DataDog/datadog-agent/cmd/process-agent/command" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/util/status" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -66,7 +66,7 @@ func TestNotRunning(t *testing.T) { cfg := configmock.New(t) cfg.SetWithoutSource("process_config.cmd_port", 8082) - addressPort, err := config.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) require.NoError(t, err) statusURL := fmt.Sprintf("http://%s/agent/status", addressPort) @@ -81,7 +81,7 @@ func TestNotRunning(t *testing.T) { func TestError(t *testing.T) { cfg := configmock.New(t) cfg.SetWithoutSource("cmd_host", "8.8.8.8") // Non-local ip address will cause error in `GetIPCAddress` - _, ipcError := config.GetIPCAddress() + _, ipcError := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) var errText, expectedErrText strings.Builder url, err := getStatusURL() diff --git a/cmd/process-agent/subcommands/taggerlist/tagger_list.go b/cmd/process-agent/subcommands/taggerlist/tagger_list.go index 07222e4612555..8d0f55ee18c52 100644 --- a/cmd/process-agent/subcommands/taggerlist/tagger_list.go +++ b/cmd/process-agent/subcommands/taggerlist/tagger_list.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/api" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -62,7 +62,7 @@ func taggerList(deps dependencies) error { } func getTaggerURL() (string, error) { - addressPort, err := ddconfig.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("config error: %s", err.Error()) } diff --git a/cmd/process-agent/subcommands/workloadlist/command.go b/cmd/process-agent/subcommands/workloadlist/command.go index c17d82429bb8e..a123e4c4f9fa2 100644 --- a/cmd/process-agent/subcommands/workloadlist/command.go +++ b/cmd/process-agent/subcommands/workloadlist/command.go @@ -20,7 +20,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/util" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -86,7 +86,7 @@ func workloadList(_ log.Component, config config.Component, cliParams *cliParams } func workloadURL(verbose bool) (string, error) { - addressPort, err := ddconfig.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("config error: %s", err.Error()) } diff --git a/cmd/security-agent/api/agent/agent.go b/cmd/security-agent/api/agent/agent.go index 46f54069f66af..06ad276eae538 100644 --- a/cmd/security-agent/api/agent/agent.go +++ b/cmd/security-agent/api/agent/agent.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/settings" "github.com/DataDog/datadog-agent/comp/core/status" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -140,7 +140,7 @@ func (a *Agent) getHealth(w http.ResponseWriter, _ *http.Request) { func (a *Agent) makeFlare(w http.ResponseWriter, _ *http.Request) { log.Infof("Making a flare") w.Header().Set("Content-Type", "application/json") - logFile := config.Datadog().GetString("security_agent.log_file") + logFile := pkgconfigsetup.Datadog().GetString("security_agent.log_file") filePath, err := flare.CreateSecurityAgentArchive(false, logFile, a.statusComponent) if err != nil || filePath == "" { diff --git a/cmd/security-agent/api/listener.go b/cmd/security-agent/api/listener.go index 822c86afd7b0c..7f5d5658e54d8 100644 --- a/cmd/security-agent/api/listener.go +++ b/cmd/security-agent/api/listener.go @@ -9,14 +9,14 @@ import ( "fmt" "net" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // newListener creates a listening connection func newListener() (net.Listener, error) { - address, err := config.GetIPCAddress() + address, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - return net.Listen("tcp", fmt.Sprintf("%v:%v", address, config.Datadog().GetInt("security_agent.cmd_port"))) + return net.Listen("tcp", fmt.Sprintf("%v:%v", address, pkgconfigsetup.Datadog().GetInt("security_agent.cmd_port"))) } diff --git a/cmd/security-agent/api/server.go b/cmd/security-agent/api/server.go index 60bd02ceb05ea..3b44b83e39436 100644 --- a/cmd/security-agent/api/server.go +++ b/cmd/security-agent/api/server.go @@ -29,7 +29,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) // Server implements security agent API server @@ -61,7 +62,7 @@ func (s *Server) Start() error { // Validate token for every request r.Use(validateToken) - err := util.CreateAndSetAuthToken(config.Datadog()) + err := util.CreateAndSetAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } @@ -89,13 +90,13 @@ func (s *Server) Start() error { } // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := config.NewLogWriter(4, seelog.ErrorLvl) + logWriter, _ := pkglogsetup.NewLogWriter(4, seelog.ErrorLvl) srv := &http.Server{ Handler: r, ErrorLog: stdLog.New(logWriter, "Error from the agent http API server: ", 0), // log errors to seelog, TLSConfig: &tlsConfig, - WriteTimeout: config.Datadog().GetDuration("server_timeout") * time.Second, + WriteTimeout: pkgconfigsetup.Datadog().GetDuration("server_timeout") * time.Second, } tlsListener := tls.NewListener(s.listener, &tlsConfig) diff --git a/cmd/security-agent/subcommands/config/config.go b/cmd/security-agent/subcommands/config/config.go index 4ea160937b704..f001e4cf66e3d 100644 --- a/cmd/security-agent/subcommands/config/config.go +++ b/cmd/security-agent/subcommands/config/config.go @@ -20,10 +20,10 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/fetcher" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -147,13 +147,13 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { return []*cobra.Command{cmd} } func getSettingsClient(_ *cobra.Command, _ []string) (settings.Client, error) { - err := util.SetAuthToken(pkgconfig.Datadog()) + err := util.SetAuthToken(pkgconfigsetup.Datadog()) if err != nil { return nil, err } c := util.GetClient(false) - apiConfigURL := fmt.Sprintf("https://localhost:%v/agent/config", pkgconfig.Datadog().GetInt("security_agent.cmd_port")) + apiConfigURL := fmt.Sprintf("https://localhost:%v/agent/config", pkgconfigsetup.Datadog().GetInt("security_agent.cmd_port")) return settingshttp.NewClient(c, apiConfigURL, "security-agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil } diff --git a/cmd/security-agent/subcommands/runtime/activity_dump.go b/cmd/security-agent/subcommands/runtime/activity_dump.go index 46465a0986dd2..514cdda1bc612 100644 --- a/cmd/security-agent/subcommands/runtime/activity_dump.go +++ b/cmd/security-agent/subcommands/runtime/activity_dump.go @@ -9,11 +9,13 @@ package runtime import ( + "encoding/json" "fmt" "os" "github.com/spf13/cobra" "go.uber.org/fx" + "gopkg.in/yaml.v2" "github.com/DataDog/datadog-agent/cmd/security-agent/command" "github.com/DataDog/datadog-agent/comp/core" @@ -23,6 +25,7 @@ import ( secagent "github.com/DataDog/datadog-agent/pkg/security/agent" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/proto/api" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" "github.com/DataDog/datadog-agent/pkg/security/security_profile/dump" "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -57,6 +60,8 @@ func activityDumpCommands(globalParams *command.GlobalParams) []*cobra.Command { activityDumpCmd.AddCommand(listCommands(globalParams)...) activityDumpCmd.AddCommand(stopCommands(globalParams)...) activityDumpCmd.AddCommand(diffCommands(globalParams)...) + activityDumpCmd.AddCommand(activityDumpToWorkloadPolicyCommands(globalParams)...) + activityDumpCmd.AddCommand(activityDumpToSeccompProfileCommands(globalParams)...) return []*cobra.Command{activityDumpCmd} } @@ -615,3 +620,246 @@ func stopActivityDump(_ log.Component, _ config.Component, _ secrets.Component, fmt.Println("done!") return nil } + +type activityDumpToWorkloadPolicyCliParams struct { + *command.GlobalParams + + input string + output string + kill bool + allowlist bool + lineage bool + service string + imageName string + imageTag string + fim bool +} + +func activityDumpToWorkloadPolicyCommands(globalParams *command.GlobalParams) []*cobra.Command { + cliParams := &activityDumpToWorkloadPolicyCliParams{ + GlobalParams: globalParams, + } + + ActivityDumpWorkloadPolicyCmd := &cobra.Command{ + Use: "workload-policy", + Hidden: true, + Short: "convert an activity dump to a workload policy", + RunE: func(_ *cobra.Command, _ []string) error { + return fxutil.OneShot(activityDumpToWorkloadPolicy, + fx.Supply(cliParams), + fx.Supply(core.BundleParams{ + ConfigParams: config.NewSecurityAgentParams(globalParams.ConfigFilePaths), + SecretParams: secrets.NewEnabledParams(), + LogParams: log.ForOneShot(command.LoggerName, "info", true)}), + core.Bundle(), + ) + }, + } + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.input, + "input", + "", + "path to the activity-dump file", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.output, + "output", + "", + "path to the generated workload policy file", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().BoolVar( + &cliParams.kill, + "kill", + false, + "generate kill action with the workload policy", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().BoolVar( + &cliParams.fim, + "fim", + false, + "generate fim rules with the workload policy", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().BoolVar( + &cliParams.allowlist, + "allowlist", + false, + "generate allow list rules", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().BoolVar( + &cliParams.lineage, + "lineage", + false, + "generate lineage rules", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.service, + "service", + "", + "apply on specified service", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.imageTag, + "image-tag", + "", + "apply on specified image tag", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.imageName, + "image-name", + "", + "apply on specified image name", + ) + + return []*cobra.Command{ActivityDumpWorkloadPolicyCmd} +} + +func activityDumpToWorkloadPolicy(_ log.Component, _ config.Component, _ secrets.Component, args *activityDumpToWorkloadPolicyCliParams) error { + + opts := dump.SECLRuleOpts{ + EnableKill: args.kill, + AllowList: args.allowlist, + Lineage: args.lineage, + Service: args.service, + ImageName: args.imageName, + ImageTag: args.imageTag, + FIM: args.fim, + } + + ads, err := dump.LoadActivityDumpsFromFiles(args.input) + if err != nil { + return err + } + + generatedRules := dump.GenerateRules(ads, opts) + generatedRules = utils.BuildPatterns(generatedRules) + + policyDef := rules.PolicyDef{ + Rules: generatedRules, + } + + // Verify policy syntax + var policyName string + if len(args.imageName) > 0 { + policyName = fmt.Sprintf("%s_policy", args.imageName) + } else { + policyName = "workload_policy" + } + policy, err := rules.LoadPolicyFromDefinition(policyName, "workload", &policyDef, nil, nil) + + if err != nil { + return fmt.Errorf("error in generated ruleset's syntax: '%s'", err) + } + + b, err := yaml.Marshal(policy) + if err != nil { + return err + } + + output := os.Stdout + if args.output != "" && args.output != "-" { + output, err = os.Create(args.output) + if err != nil { + return err + } + defer output.Close() + } + + fmt.Fprint(output, string(b)) + + return nil +} + +type activityDumpToSeccompProfileCliParams struct { + *command.GlobalParams + + input string + output string + format string +} + +func activityDumpToSeccompProfileCommands(globalParams *command.GlobalParams) []*cobra.Command { + cliParams := &activityDumpToSeccompProfileCliParams{ + GlobalParams: globalParams, + } + + ActivityDumpToSeccompProfileCmd := &cobra.Command{ + Use: "workload-seccomp", + Hidden: true, + Short: "convert an activity dump to a seccomp profile", + RunE: func(_ *cobra.Command, _ []string) error { + return fxutil.OneShot(activityDumpToSeccompProfile, + fx.Supply(cliParams), + fx.Supply(core.BundleParams{ + ConfigParams: config.NewSecurityAgentParams(globalParams.ConfigFilePaths), + SecretParams: secrets.NewEnabledParams(), + LogParams: log.ForOneShot(command.LoggerName, "info", true)}), + core.Bundle(), + ) + }, + } + + ActivityDumpToSeccompProfileCmd.Flags().StringVar( + &cliParams.input, + "input", + "", + "path to the activity-dump file", + ) + + ActivityDumpToSeccompProfileCmd.Flags().StringVar( + &cliParams.output, + "output", + "", + "path to the generated seccomp profile file", + ) + + ActivityDumpToSeccompProfileCmd.Flags().StringVar( + &cliParams.format, + "format", + "json", + "format of the generated seccomp profile file", + ) + + return []*cobra.Command{ActivityDumpToSeccompProfileCmd} +} +func activityDumpToSeccompProfile(_ log.Component, _ config.Component, _ secrets.Component, args *activityDumpToSeccompProfileCliParams) error { + + ads, err := dump.LoadActivityDumpsFromFiles(args.input) + if err != nil { + return err + } + + seccompProfile := dump.GenerateSeccompProfile(ads) + + var b []byte + if args.format == "yaml" { + b, err = yaml.Marshal(seccompProfile) + } else { + b, err = json.Marshal(seccompProfile) + } + + if err != nil { + return err + } + + output := os.Stdout + if args.output != "" && args.output != "-" { + output, err = os.Create(args.output) + if err != nil { + return err + } + defer output.Close() + } + + fmt.Fprint(output, string(b)) + + return nil +} diff --git a/cmd/security-agent/subcommands/runtime/activity_dump_test.go b/cmd/security-agent/subcommands/runtime/activity_dump_test.go index ed1bca1cdf6f1..555461135dee5 100644 --- a/cmd/security-agent/subcommands/runtime/activity_dump_test.go +++ b/cmd/security-agent/subcommands/runtime/activity_dump_test.go @@ -53,3 +53,19 @@ func TestDumpActivityDumpCommand(t *testing.T) { generateActivityDump, func() {}) } + +func TestActivityDumpToWorkloadPolicyCommand(t *testing.T) { + fxutil.TestOneShotSubcommand(t, + Commands(&command.GlobalParams{}), + []string{"runtime", "activity-dump", "workload-policy", "--input", "file"}, + activityDumpToWorkloadPolicy, + func() {}) +} + +func TestActivityDumpToSeccompProfileCommand(t *testing.T) { + fxutil.TestOneShotSubcommand(t, + Commands(&command.GlobalParams{}), + []string{"runtime", "activity-dump", "workload-seccomp", "--input", "file", "--output", "file"}, + activityDumpToSeccompProfile, + func() {}) +} diff --git a/cmd/security-agent/subcommands/runtime/command.go b/cmd/security-agent/subcommands/runtime/command.go index 7725d89c81e44..ec5196e7aab12 100644 --- a/cmd/security-agent/subcommands/runtime/command.go +++ b/cmd/security-agent/subcommands/runtime/command.go @@ -33,7 +33,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" secagent "github.com/DataDog/datadog-agent/pkg/security/agent" "github.com/DataDog/datadog-agent/pkg/security/common" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -105,7 +105,7 @@ func evalCommands(globalParams *command.GlobalParams) []*cobra.Command { }, } - evalCmd.Flags().StringVar(&evalArgs.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + evalCmd.Flags().StringVar(&evalArgs.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") evalCmd.Flags().StringVar(&evalArgs.ruleID, "rule-id", "", "Rule ID to evaluate") _ = evalCmd.MarkFlagRequired("rule-id") evalCmd.Flags().StringVar(&evalArgs.eventFile, "event-file", "", "File of the event data") @@ -138,7 +138,7 @@ func commonCheckPoliciesCommands(globalParams *command.GlobalParams) []*cobra.Co }, } - commonCheckPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + commonCheckPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") commonCheckPoliciesCmd.Flags().BoolVar(&cliParams.evaluateAllPolicySources, "loaded-policies", false, "Evaluate loaded policies") if runtime.GOOS == "linux" { commonCheckPoliciesCmd.Flags().BoolVar(&cliParams.windowsModel, "windows-model", false, "Evaluate policies using the Windows model") diff --git a/cmd/security-agent/subcommands/runtime/deprecated_commands.go b/cmd/security-agent/subcommands/runtime/deprecated_commands.go index 888ac84bd4d9c..bd695ef7d0449 100644 --- a/cmd/security-agent/subcommands/runtime/deprecated_commands.go +++ b/cmd/security-agent/subcommands/runtime/deprecated_commands.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -42,7 +42,7 @@ func checkPoliciesCommands(globalParams *command.GlobalParams) []*cobra.Command Deprecated: "please use `security-agent runtime policy check` instead", } - checkPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + checkPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") return []*cobra.Command{checkPoliciesCmd} } diff --git a/cmd/security-agent/subcommands/runtime/security_profile.go b/cmd/security-agent/subcommands/runtime/security_profile.go index 27d65dad73854..e02a85c37cca7 100644 --- a/cmd/security-agent/subcommands/runtime/security_profile.go +++ b/cmd/security-agent/subcommands/runtime/security_profile.go @@ -41,14 +41,14 @@ func securityProfileCommands(globalParams *command.GlobalParams) []*cobra.Comman Short: "security profile commands", } - securityProfileCmd.AddCommand(securityProfileShowCommands(globalParams)...) + securityProfileCmd.AddCommand(showSecurityProfileCommands(globalParams)...) securityProfileCmd.AddCommand(listSecurityProfileCommands(globalParams)...) securityProfileCmd.AddCommand(saveSecurityProfileCommands(globalParams)...) return []*cobra.Command{securityProfileCmd} } -func securityProfileShowCommands(globalParams *command.GlobalParams) []*cobra.Command { +func showSecurityProfileCommands(globalParams *command.GlobalParams) []*cobra.Command { cliParams := &securityProfileCliParams{ GlobalParams: globalParams, } diff --git a/cmd/security-agent/subcommands/start/command.go b/cmd/security-agent/subcommands/start/command.go index c4cf04e4a0dc0..b8631b19a4d8b 100644 --- a/cmd/security-agent/subcommands/start/command.go +++ b/cmd/security-agent/subcommands/start/command.go @@ -55,10 +55,9 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl" "github.com/DataDog/datadog-agent/pkg/collector/python" pkgCompliance "github.com/DataDog/datadog-agent/pkg/compliance" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" - "github.com/DataDog/datadog-agent/pkg/config/setup" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/agent" "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/status/health" @@ -97,7 +96,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { ConfigParams: config.NewSecurityAgentParams(params.ConfigFilePaths, config.WithFleetPoliciesDirPath(globalParams.FleetPoliciesDirPath)), SysprobeConfigParams: sysprobeconfigimpl.NewParams(sysprobeconfigimpl.WithSysProbeConfFilePath(globalParams.SysProbeConfFilePath), sysprobeconfigimpl.WithFleetPoliciesDirPath(globalParams.FleetPoliciesDirPath)), SecretParams: secrets.NewEnabledParams(), - LogParams: log.ForDaemon(command.LoggerName, "security_agent.log_file", setup.DefaultSecurityAgentLogFile), + LogParams: log.ForDaemon(command.LoggerName, "security_agent.log_file", pkgconfigsetup.DefaultSecurityAgentLogFile), }), core.Bundle(), dogstatsd.ClientBundle, @@ -123,7 +122,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { return startstop.NewSerialStopper() }), fx.Provide(func(config config.Component, statsd statsd.Component) (ddgostatsd.ClientInterface, error) { - return statsd.CreateForHostPort(setup.GetBindHost(config), config.GetInt("dogstatsd_port")) + return statsd.CreateForHostPort(pkgconfigsetup.GetBindHost(config), config.GetInt("dogstatsd_port")) }), fx.Provide(func(stopper startstop.Stopper, log log.Component, config config.Component, statsdClient ddgostatsd.ClientInterface, wmeta workloadmeta.Component) (status.InformationProvider, *agent.RuntimeSecurityAgent, error) { hostnameDetected, err := utils.GetHostnameWithContextAndFallback(context.TODO()) @@ -284,7 +283,7 @@ func RunAgent(log log.Component, config config.Component, telemetry telemetry.Co // Setup expvar server port := config.GetString("security_agent.expvar_port") - pkgconfig.Datadog().Set("expvar_port", port, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("expvar_port", port, model.SourceAgentRuntime) if config.GetBool("telemetry.enabled") { http.Handle("/telemetry", telemetry.Handler()) } diff --git a/cmd/serverless-init/cloudservice/cloudrun.go b/cmd/serverless-init/cloudservice/cloudrun.go index d664fb2f7cc93..120f3725367b8 100644 --- a/cmd/serverless-init/cloudservice/cloudrun.go +++ b/cmd/serverless-init/cloudservice/cloudrun.go @@ -6,21 +6,28 @@ package cloudservice import ( + "fmt" + "github.com/DataDog/datadog-agent/pkg/util/log" "os" "github.com/DataDog/datadog-agent/cmd/serverless-init/cloudservice/helper" ) const ( - revisionNameEnvVar = "K_REVISION" //nolint:revive // TODO(SERV) Fix revive linter - ServiceNameEnvVar = "K_SERVICE" + revisionNameEnvVar = "K_REVISION" + ServiceNameEnvVar = "K_SERVICE" // ServiceNameEnvVar is also used in the trace package + configurationNameEnvVar = "K_CONFIGURATION" + functionTypeEnvVar = "FUNCTION_SIGNATURE_TYPE" + functionTargetEnvVar = "FUNCTION_TARGET" // exists as a cloudrunfunction env var for all runtimes except Go ) var metadataHelperFunc = helper.GetMetaData // CloudRun has helper functions for getting Google Cloud Run data -type CloudRun struct{} +type CloudRun struct { + cloudRunFunctionMode bool +} // GetTags returns a map of gcp-related tags. func (c *CloudRun) GetTags() map[string]string { @@ -28,6 +35,7 @@ func (c *CloudRun) GetTags() map[string]string { revisionName := os.Getenv(revisionNameEnvVar) serviceName := os.Getenv(ServiceNameEnvVar) + configName := os.Getenv(configurationNameEnvVar) if revisionName != "" { tags["revision_name"] = revisionName @@ -37,21 +45,48 @@ func (c *CloudRun) GetTags() map[string]string { tags["service_name"] = serviceName } + if configName != "" { + tags["configuration_name"] = configName + } + + if c.cloudRunFunctionMode { + tags = getFunctionTags(tags) + } tags["origin"] = c.GetOrigin() tags["_dd.origin"] = c.GetOrigin() return tags } +func getFunctionTags(tags map[string]string) map[string]string { + functionTarget := os.Getenv(functionTargetEnvVar) + functionSignatureType := os.Getenv(functionTypeEnvVar) + + if functionTarget != "" { + tags["function_target"] = functionTarget + } + + if functionSignatureType != "" { + tags["function_signature_type"] = functionSignatureType + } + return tags +} + // GetOrigin returns the `origin` attribute type for the given // cloud service. func (c *CloudRun) GetOrigin() string { + if c.cloudRunFunctionMode { + return "cloudfunctions" + } return "cloudrun" } // GetPrefix returns the prefix that we're prefixing all // metrics with. func (c *CloudRun) GetPrefix() string { + if c.cloudRunFunctionMode { + return "gcp.cloudfunctions" + } return "gcp.run" } @@ -64,3 +99,9 @@ func isCloudRunService() bool { _, exists := os.LookupEnv(ServiceNameEnvVar) return exists } + +func isCloudRunFunction() bool { + _, cloudRunFunctionMode := os.LookupEnv(functionTargetEnvVar) + log.Debug(fmt.Sprintf("cloud function mode SET TO: %t", cloudRunFunctionMode)) + return cloudRunFunctionMode +} diff --git a/cmd/serverless-init/cloudservice/cloudrun_test.go b/cmd/serverless-init/cloudservice/cloudrun_test.go index 4832b30f48478..41babaa632a1c 100644 --- a/cmd/serverless-init/cloudservice/cloudrun_test.go +++ b/cmd/serverless-init/cloudservice/cloudrun_test.go @@ -78,3 +78,45 @@ func TestGetCloudRunTagsWithEnvironmentVariables(t *testing.T) { "_dd.origin": "cloudrun", }, tags) } + +func TestGetCloudRunFunctionTagsWithEnvironmentVariables(t *testing.T) { + service := &CloudRun{cloudRunFunctionMode: true} + + metadataHelperFunc = func(*helper.GCPConfig) *helper.GCPMetadata { + return &helper.GCPMetadata{ + ContainerID: &helper.Info{ + TagName: "container_id", + Value: "test_container", + }, + Region: &helper.Info{ + TagName: "region", + Value: "test_region", + }, + ProjectID: &helper.Info{ + TagName: "project_id", + Value: "test_project", + }, + } + } + + t.Setenv("K_SERVICE", "test_service") + t.Setenv("K_REVISION", "test_revision") + t.Setenv("K_CONFIGURATION", "test_config") + t.Setenv("FUNCTION_SIGNATURE_TYPE", "test_signature") + t.Setenv("FUNCTION_TARGET", "test_target") + + tags := service.GetTags() + + assert.Equal(t, map[string]string{ + "container_id": "test_container", + "region": "test_region", + "origin": "cloudfunctions", + "project_id": "test_project", + "service_name": "test_service", + "revision_name": "test_revision", + "configuration_name": "test_config", + "_dd.origin": "cloudfunctions", + "function_target": "test_target", + "function_signature_type": "test_signature", + }, tags) +} diff --git a/cmd/serverless-init/cloudservice/service.go b/cmd/serverless-init/cloudservice/service.go index 0449d28d51533..54c9a3576a35a 100644 --- a/cmd/serverless-init/cloudservice/service.go +++ b/cmd/serverless-init/cloudservice/service.go @@ -51,6 +51,9 @@ func (l *LocalService) Init() error { //nolint:revive // TODO(SERV) Fix revive linter func GetCloudServiceType() CloudService { if isCloudRunService() { + if isCloudRunFunction() { + return &CloudRun{cloudRunFunctionMode: true} + } return &CloudRun{} } diff --git a/cmd/serverless-init/main.go b/cmd/serverless-init/main.go index 82493f7f9b5a6..0db7244b7774b 100644 --- a/cmd/serverless-init/main.go +++ b/cmd/serverless-init/main.go @@ -41,8 +41,8 @@ import ( "github.com/DataDog/datadog-agent/cmd/serverless-init/metric" serverlessInitTag "github.com/DataDog/datadog-agent/cmd/serverless-init/tag" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/serverless/metrics" "github.com/DataDog/datadog-agent/pkg/serverless/otlp" @@ -111,7 +111,7 @@ func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *se tracelog.SetLogger(corelogger{}) // load proxy settings - pkgconfig.LoadProxyFromEnv(pkgconfig.Datadog()) + pkgconfigsetup.LoadProxyFromEnv(pkgconfigsetup.Datadog()) cloudService := cloudservice.GetCloudServiceType() @@ -123,7 +123,7 @@ func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *se tags := serverlessInitTag.GetBaseTagsMapWithMetadata( serverlessTag.MergeWithOverwrite( - serverlessTag.ArrayToMap(configUtils.GetConfiguredTags(pkgconfig.Datadog(), false)), + serverlessTag.ArrayToMap(configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false)), cloudService.GetTags()), modeConf.TagVersionMode) @@ -134,7 +134,7 @@ func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *se // The datadog-agent requires Load to be called or it could // panic down the line. - _, err := pkgconfig.LoadWithoutSecret() + _, err := pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil) if err != nil { log.Debugf("Error loading config: %v\n", err) } @@ -151,7 +151,7 @@ func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *se return cloudService, agentLogConfig, traceAgent, metricAgent, logsAgent } func setupTraceAgent(tags map[string]string) trace.ServerlessTraceAgent { - traceAgent := trace.StartServerlessTraceAgent(pkgconfig.Datadog().GetBool("apm_config.enabled"), &trace.LoadConfig{Path: datadogConfigPath}, nil, random.Random.Uint64()) + traceAgent := trace.StartServerlessTraceAgent(pkgconfigsetup.Datadog().GetBool("apm_config.enabled"), &trace.LoadConfig{Path: datadogConfigPath}, nil, random.Random.Uint64()) traceAgent.SetTags(tags) go func() { for range time.Tick(3 * time.Second) { @@ -162,8 +162,8 @@ func setupTraceAgent(tags map[string]string) trace.ServerlessTraceAgent { } func setupMetricAgent(tags map[string]string) *metrics.ServerlessMetricAgent { - pkgconfig.Datadog().Set("use_v2_api.series", false, model.SourceAgentRuntime) - pkgconfig.Datadog().Set("dogstatsd_socket", "", model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("use_v2_api.series", false, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("dogstatsd_socket", "", model.SourceAgentRuntime) metricAgent := &metrics.ServerlessMetricAgent{ SketchesBucketOffset: time.Second * 0, diff --git a/cmd/serverless-init/tag/tag_test.go b/cmd/serverless-init/tag/tag_test.go index 007604aa88002..7548b5dff151d 100644 --- a/cmd/serverless-init/tag/tag_test.go +++ b/cmd/serverless-init/tag/tag_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" serverlessTag "github.com/DataDog/datadog-agent/pkg/serverless/tags" ) @@ -97,7 +97,7 @@ func TestDdTags(t *testing.T) { overwritingTags := map[string]string{ "originalKey": "overWrittenValue", } - mergedTags := serverlessTag.MergeWithOverwrite(serverlessTag.ArrayToMap(configUtils.GetConfiguredTags(config.Datadog(), false)), overwritingTags) + mergedTags := serverlessTag.MergeWithOverwrite(serverlessTag.ArrayToMap(configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false)), overwritingTags) assert.Equal(t, "overWrittenValue", mergedTags["originalKey"]) assert.Equal(t, "value2", mergedTags["key2"]) assert.Equal(t, "value3", mergedTags["key3"]) diff --git a/cmd/serverless/dependencies_linux_amd64.txt b/cmd/serverless/dependencies_linux_amd64.txt index 0817058707727..562c2c24ce68f 100644 --- a/cmd/serverless/dependencies_linux_amd64.txt +++ b/cmd/serverless/dependencies_linux_amd64.txt @@ -139,12 +139,14 @@ github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types github.com/DataDog/datadog-agent/pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/collector/check/id github.com/DataDog/datadog-agent/pkg/collector/check/stats -github.com/DataDog/datadog-agent/pkg/config github.com/DataDog/datadog-agent/pkg/config/env github.com/DataDog/datadog-agent/pkg/config/model +github.com/DataDog/datadog-agent/pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/remote/data github.com/DataDog/datadog-agent/pkg/config/setup github.com/DataDog/datadog-agent/pkg/config/setup/constants +github.com/DataDog/datadog-agent/pkg/config/structure +github.com/DataDog/datadog-agent/pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis github.com/DataDog/datadog-agent/pkg/errors @@ -438,6 +440,7 @@ github.com/go-logr/logr github.com/go-logr/logr/funcr github.com/go-logr/stdr github.com/go-viper/mapstructure/v2 +github.com/go-viper/mapstructure/v2/internal/errors github.com/godbus/dbus/v5 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/jsonpb @@ -580,16 +583,16 @@ go.opentelemetry.io/collector/connector go.opentelemetry.io/collector/consumer go.opentelemetry.io/collector/consumer/consumererror go.opentelemetry.io/collector/exporter +go.opentelemetry.io/collector/exporter/debugexporter +go.opentelemetry.io/collector/exporter/debugexporter/internal/metadata +go.opentelemetry.io/collector/exporter/debugexporter/internal/normal go.opentelemetry.io/collector/exporter/exporterbatcher go.opentelemetry.io/collector/exporter/exporterhelper go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata go.opentelemetry.io/collector/exporter/exporterqueue -go.opentelemetry.io/collector/exporter/internal/common go.opentelemetry.io/collector/exporter/internal/experr go.opentelemetry.io/collector/exporter/internal/otlptext go.opentelemetry.io/collector/exporter/internal/queue -go.opentelemetry.io/collector/exporter/loggingexporter -go.opentelemetry.io/collector/exporter/loggingexporter/internal/metadata go.opentelemetry.io/collector/exporter/otlpexporter go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata go.opentelemetry.io/collector/extension @@ -663,6 +666,7 @@ go.opentelemetry.io/contrib/config go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil go.opentelemetry.io/contrib/propagators/b3 @@ -725,7 +729,6 @@ go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.25.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/trace diff --git a/cmd/serverless/dependencies_linux_arm64.txt b/cmd/serverless/dependencies_linux_arm64.txt index 31d6006a051e9..8d28beb46136c 100644 --- a/cmd/serverless/dependencies_linux_arm64.txt +++ b/cmd/serverless/dependencies_linux_arm64.txt @@ -139,12 +139,14 @@ github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types github.com/DataDog/datadog-agent/pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/collector/check/id github.com/DataDog/datadog-agent/pkg/collector/check/stats -github.com/DataDog/datadog-agent/pkg/config github.com/DataDog/datadog-agent/pkg/config/env github.com/DataDog/datadog-agent/pkg/config/model +github.com/DataDog/datadog-agent/pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/remote/data github.com/DataDog/datadog-agent/pkg/config/setup github.com/DataDog/datadog-agent/pkg/config/setup/constants +github.com/DataDog/datadog-agent/pkg/config/structure +github.com/DataDog/datadog-agent/pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis github.com/DataDog/datadog-agent/pkg/errors @@ -438,6 +440,7 @@ github.com/go-logr/logr github.com/go-logr/logr/funcr github.com/go-logr/stdr github.com/go-viper/mapstructure/v2 +github.com/go-viper/mapstructure/v2/internal/errors github.com/godbus/dbus/v5 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/jsonpb @@ -579,16 +582,16 @@ go.opentelemetry.io/collector/connector go.opentelemetry.io/collector/consumer go.opentelemetry.io/collector/consumer/consumererror go.opentelemetry.io/collector/exporter +go.opentelemetry.io/collector/exporter/debugexporter +go.opentelemetry.io/collector/exporter/debugexporter/internal/metadata +go.opentelemetry.io/collector/exporter/debugexporter/internal/normal go.opentelemetry.io/collector/exporter/exporterbatcher go.opentelemetry.io/collector/exporter/exporterhelper go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata go.opentelemetry.io/collector/exporter/exporterqueue -go.opentelemetry.io/collector/exporter/internal/common go.opentelemetry.io/collector/exporter/internal/experr go.opentelemetry.io/collector/exporter/internal/otlptext go.opentelemetry.io/collector/exporter/internal/queue -go.opentelemetry.io/collector/exporter/loggingexporter -go.opentelemetry.io/collector/exporter/loggingexporter/internal/metadata go.opentelemetry.io/collector/exporter/otlpexporter go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata go.opentelemetry.io/collector/extension @@ -662,6 +665,7 @@ go.opentelemetry.io/contrib/config go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil go.opentelemetry.io/contrib/propagators/b3 @@ -724,7 +728,6 @@ go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.25.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/trace diff --git a/cmd/serverless/main.go b/cmd/serverless/main.go index 122368a28d675..68865f96aa14e 100644 --- a/cmd/serverless/main.go +++ b/cmd/serverless/main.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" taggernoop "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl" logConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless" @@ -42,6 +42,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) // AWS Lambda is writing the Lambda function files in /var/task, we want the @@ -49,12 +50,12 @@ import ( var datadogConfigPath = "/var/task/datadog.yaml" const ( - loggerName config.LoggerName = "DD_EXTENSION" - logLevelEnvVar = "DD_LOG_LEVEL" - flushStrategyEnvVar = "DD_SERVERLESS_FLUSH_STRATEGY" - logsLogsTypeSubscribed = "DD_LOGS_CONFIG_LAMBDA_LOGS_TYPE" - extensionRegistrationRoute = "/2020-01-01/extension/register" - extensionRegistrationTimeout = 5 * time.Second + loggerName pkglogsetup.LoggerName = "DD_EXTENSION" + logLevelEnvVar = "DD_LOG_LEVEL" + flushStrategyEnvVar = "DD_SERVERLESS_FLUSH_STRATEGY" + logsLogsTypeSubscribed = "DD_LOGS_CONFIG_LAMBDA_LOGS_TYPE" + extensionRegistrationRoute = "/2020-01-01/extension/register" + extensionRegistrationTimeout = 5 * time.Second // httpServerAddr will be the default addr used to run the HTTP server listening // to calls from the client libraries and to logs from the AWS environment. @@ -144,7 +145,7 @@ func runAgent(tagger tagger.Component) { setupProxy(appsecProxyProcessor, ta, serverlessDaemon) - serverlessDaemon.ComputeGlobalTags(configUtils.GetConfiguredTags(config.Datadog(), true)) + serverlessDaemon.ComputeGlobalTags(configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true)) stopCh := startInvocationLoop(serverlessDaemon, serverlessID) @@ -210,7 +211,7 @@ func startMetricAgent(serverlessDaemon *daemon.Daemon, logChannel chan *logConfi } metricAgent.Start(daemon.FlushTimeout, &metrics.MetricConfig{}, &metrics.MetricDogStatsD{}) serverlessDaemon.SetStatsdServer(metricAgent) - serverlessDaemon.SetupLogCollectionHandler(logsAPICollectionRoute, logChannel, config.Datadog().GetBool("serverless.logs_enabled"), config.Datadog().GetBool("enhanced_metrics"), lambdaInitMetricChan) + serverlessDaemon.SetupLogCollectionHandler(logsAPICollectionRoute, logChannel, pkgconfigsetup.Datadog().GetBool("serverless.logs_enabled"), pkgconfigsetup.Datadog().GetBool("enhanced_metrics"), lambdaInitMetricChan) return metricAgent } @@ -256,10 +257,10 @@ func startCommunicationServer(startTime time.Time) *daemon.Daemon { func setupLambdaAgentOverrides() { flavor.SetFlavor(flavor.ServerlessAgent) - config.Datadog().Set("use_v2_api.series", false, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("use_v2_api.series", false, model.SourceAgentRuntime) // TODO(duncanista): figure out how this is used and if it's necessary for Serverless - config.Datadog().Set("dogstatsd_socket", "", model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("dogstatsd_socket", "", model.SourceAgentRuntime) // Disable remote configuration for now as it just spams the debug logs // and provides no value. @@ -335,7 +336,7 @@ func startOtlpAgent(wg *sync.WaitGroup, metricAgent *metrics.ServerlessMetricAge func startTraceAgent(wg *sync.WaitGroup, lambdaSpanChan chan *pb.Span, coldStartSpanId uint64, serverlessDaemon *daemon.Daemon) { defer wg.Done() - traceAgent := trace.StartServerlessTraceAgent(config.Datadog().GetBool("apm_config.enabled"), &trace.LoadConfig{Path: datadogConfigPath}, lambdaSpanChan, coldStartSpanId) + traceAgent := trace.StartServerlessTraceAgent(pkgconfigsetup.Datadog().GetBool("apm_config.enabled"), &trace.LoadConfig{Path: datadogConfigPath}, lambdaSpanChan, coldStartSpanId) serverlessDaemon.SetTraceAgent(traceAgent) } @@ -372,9 +373,9 @@ func setupApiKey() bool { } func loadConfig() { - config.Datadog().SetConfigFile(datadogConfigPath) + pkgconfigsetup.Datadog().SetConfigFile(datadogConfigPath) // Load datadog.yaml file into the config, so that metricAgent can pick these configurations - if _, err := config.LoadWithoutSecret(); err != nil { + if _, err := pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil); err != nil { log.Errorf("Error happened when loading configuration from datadog.yaml for metric agent: %s", err) } } @@ -401,7 +402,7 @@ func setupLogger() { } // init the logger configuring it to not log in a file (the first empty string) - if err := config.SetupLogger( + if err := pkglogsetup.SetupLogger( loggerName, logLevel, "", // logFile -> by setting this to an empty string, we don't write the logs to any file @@ -409,6 +410,7 @@ func setupLogger() { false, // syslog_rfc true, // log_to_console false, // log_format_json + pkgconfigsetup.Datadog(), ); err != nil { log.Errorf("Unable to setup logger: %s", err) } diff --git a/cmd/system-probe/api/client/client.go b/cmd/system-probe/api/client/client.go index 2d7ed13f17af4..81f8efd38bebb 100644 --- a/cmd/system-probe/api/client/client.go +++ b/cmd/system-probe/api/client/client.go @@ -7,25 +7,10 @@ package client import ( - "context" - "net" "net/http" - "time" ) // Get returns a http client configured to talk to the system-probe func Get(socketPath string) *http.Client { - return &http.Client{ - Timeout: 10 * time.Second, - Transport: &http.Transport{ - MaxIdleConns: 2, - IdleConnTimeout: 30 * time.Second, - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial(netType, socketPath) - }, - TLSHandshakeTimeout: 1 * time.Second, - ResponseHeaderTimeout: 5 * time.Second, - ExpectContinueTimeout: 50 * time.Millisecond, - }, - } + return newSystemProbeClient(socketPath) } diff --git a/cmd/system-probe/api/client/client_linux.go b/cmd/system-probe/api/client/client_linux.go index 92a558d19e4a2..b644838e80544 100644 --- a/cmd/system-probe/api/client/client_linux.go +++ b/cmd/system-probe/api/client/client_linux.go @@ -7,6 +7,31 @@ package client +import ( + "context" + "net" + "net/http" + "time" +) + const ( netType = "unix" ) + +// newSystemProbeClient returns a http client configured to talk to the system-probe +// Linux is unable to import pkg/process/net due to size restrictions. +func newSystemProbeClient(socketPath string) *http.Client { + return &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConns: 2, + IdleConnTimeout: 30 * time.Second, + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial(netType, socketPath) + }, + TLSHandshakeTimeout: 1 * time.Second, + ResponseHeaderTimeout: 5 * time.Second, + ExpectContinueTimeout: 50 * time.Millisecond, + }, + } +} diff --git a/cmd/system-probe/api/client/client_others.go b/cmd/system-probe/api/client/client_others.go index 48ac5dcd6614a..1204bb37e7d1a 100644 --- a/cmd/system-probe/api/client/client_others.go +++ b/cmd/system-probe/api/client/client_others.go @@ -7,6 +7,30 @@ package client +import ( + "context" + "net" + "net/http" + "time" +) + const ( netType = "tcp" ) + +// newSystemProbeClient returns a http client configured to talk to the system-probe +func newSystemProbeClient(socketPath string) *http.Client { + return &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConns: 2, + IdleConnTimeout: 30 * time.Second, + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial(netType, socketPath) + }, + TLSHandshakeTimeout: 1 * time.Second, + ResponseHeaderTimeout: 5 * time.Second, + ExpectContinueTimeout: 50 * time.Millisecond, + }, + } +} diff --git a/cmd/system-probe/api/client/client_windows.go b/cmd/system-probe/api/client/client_windows.go index d8217bb143e76..d4649c90bd5c6 100644 --- a/cmd/system-probe/api/client/client_windows.go +++ b/cmd/system-probe/api/client/client_windows.go @@ -7,6 +7,15 @@ package client -const ( - netType = "tcp" +import ( + "net/http" + + processNet "github.com/DataDog/datadog-agent/pkg/process/net" ) + +// newSystemProbeClient returns a http client configured to talk to the system-probe +// This is a simple wrapper around process_net.NewSystemProbeHttpClient because +// Linux is unable to import pkg/process/net due to size restrictions. +func newSystemProbeClient(_ string) *http.Client { + return processNet.NewSystemProbeClient() +} diff --git a/cmd/system-probe/api/server.go b/cmd/system-probe/api/server.go index 8ab6909d3db54..a2d20995b9804 100644 --- a/cmd/system-probe/api/server.go +++ b/cmd/system-probe/api/server.go @@ -27,9 +27,9 @@ import ( // StartServer starts the HTTP and gRPC servers for the system-probe, which registers endpoints from all enabled modules. func StartServer(cfg *sysconfigtypes.Config, telemetry telemetry.Component, wmeta workloadmeta.Component, settings settings.Component) error { - conn, err := net.NewListener(cfg.SocketAddress) + conn, err := net.NewSystemProbeListener(cfg.SocketAddress) if err != nil { - return fmt.Errorf("error creating IPC socket: %s", err) + return err } mux := gorilla.NewRouter() diff --git a/cmd/system-probe/config/adjust.go b/cmd/system-probe/config/adjust.go index f83ee25eb80e2..37c0fb29cfeb9 100644 --- a/cmd/system-probe/config/adjust.go +++ b/cmd/system-probe/config/adjust.go @@ -10,7 +10,6 @@ import ( "fmt" "sync" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -18,7 +17,7 @@ import ( var adjustMtx sync.Mutex // Adjust makes changes to the raw config based on deprecations and inferences. -func Adjust(cfg config.Config) { +func Adjust(cfg model.Config) { adjustMtx.Lock() defer adjustMtx.Unlock() if cfg.GetBool(spNS("adjusted")) { @@ -59,7 +58,7 @@ func Adjust(cfg config.Config) { // validateString validates the string configuration value at `key` using a custom provided function `valFn`. // If `key` is not set or `valFn` returns an error, the `defaultVal` is used instead. -func validateString(cfg config.Config, key string, defaultVal string, valFn func(string) error) { +func validateString(cfg model.Config, key string, defaultVal string, valFn func(string) error) { if cfg.IsSet(key) { if err := valFn(cfg.GetString(key)); err != nil { log.Errorf("error validating `%s`: %s, using default value of `%s`", key, err, defaultVal) @@ -72,7 +71,7 @@ func validateString(cfg config.Config, key string, defaultVal string, valFn func // validateInt validates the int configuration value at `key` using a custom provided function `valFn`. // If `key` is not set or `valFn` returns an error, the `defaultVal` is used instead. -func validateInt(cfg config.Config, key string, defaultVal int, valFn func(int) error) { +func validateInt(cfg model.Config, key string, defaultVal int, valFn func(int) error) { if cfg.IsSet(key) { if err := valFn(cfg.GetInt(key)); err != nil { log.Errorf("error validating `%s`: %s, using default value of `%d`", key, err, defaultVal) @@ -85,7 +84,7 @@ func validateInt(cfg config.Config, key string, defaultVal int, valFn func(int) // validateInt64 validates the int64 configuration value at `key` using a custom provided function `valFn`. // If `key` is not set or `valFn` returns an error, the `defaultVal` is used instead. -func validateInt64(cfg config.Config, key string, defaultVal int64, valFn func(int64) error) { +func validateInt64(cfg model.Config, key string, defaultVal int64, valFn func(int64) error) { if cfg.IsSet(key) { if err := valFn(cfg.GetInt64(key)); err != nil { log.Errorf("error validating `%s`: %s. using default value of `%d`", key, err, defaultVal) @@ -97,7 +96,7 @@ func validateInt64(cfg config.Config, key string, defaultVal int64, valFn func(i } // applyDefault sets configuration `key` to `defaultVal` only if not previously set. -func applyDefault(cfg config.Config, key string, defaultVal interface{}) { +func applyDefault(cfg model.Config, key string, defaultVal interface{}) { if !cfg.IsSet(key) { cfg.Set(key, defaultVal, model.SourceAgentRuntime) } @@ -105,47 +104,47 @@ func applyDefault(cfg config.Config, key string, defaultVal interface{}) { // deprecateBool logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateBool(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateBool(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.GetBool(oldkey) }) } // deprecateInt64 logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateInt64(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateInt64(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.GetInt64(oldkey) }) } // deprecateGeneric logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateGeneric(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateGeneric(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.Get(oldkey) }) } // deprecateInt logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateInt(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateInt(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.GetInt(oldkey) }) } // deprecateString logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateString(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateString(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.GetString(oldkey) }) } // deprecateCustom logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateCustom(cfg config.Config, oldkey string, newkey string, getFn func(config.Config) interface{}) { +func deprecateCustom(cfg model.Config, oldkey string, newkey string, getFn func(model.Config) interface{}) { if cfg.IsSet(oldkey) { log.Warn(deprecationMessage(oldkey, newkey)) if !cfg.IsSet(newkey) { @@ -160,7 +159,7 @@ func deprecationMessage(oldkey, newkey string) string { } // limitMaxInt logs a warning and sets `key` to `max` if the value exceeds `max`. -func limitMaxInt(cfg config.Config, key string, max int) { +func limitMaxInt(cfg model.Config, key string, max int) { val := cfg.GetInt(key) if val > max { log.Warnf("configuration key `%s` was set to `%d`, using maximum value `%d` instead", key, val, max) @@ -169,7 +168,7 @@ func limitMaxInt(cfg config.Config, key string, max int) { } // limitMaxInt64 logs a warning and sets `key` to `max` if the value exceeds `max`. -func limitMaxInt64(cfg config.Config, key string, max int64) { +func limitMaxInt64(cfg model.Config, key string, max int64) { val := cfg.GetInt64(key) if val > max { log.Warnf("configuration key `%s` was set to `%d`, using maximum value `%d` instead", key, val, max) diff --git a/cmd/system-probe/config/adjust_npm.go b/cmd/system-probe/config/adjust_npm.go index ab0fc468bf553..e1be10ae08d79 100644 --- a/cmd/system-probe/config/adjust_npm.go +++ b/cmd/system-probe/config/adjust_npm.go @@ -11,7 +11,6 @@ import ( "math" "runtime" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -24,7 +23,7 @@ const ( defaultMaxTrackedConnections = 65536 ) -func adjustNetwork(cfg config.Config) { +func adjustNetwork(cfg model.Config) { ebpflessEnabled := cfg.GetBool(netNS("enable_ebpfless")) limitMaxInt(cfg, spNS("max_conns_per_message"), maxConnsMessageBatchSize) diff --git a/cmd/system-probe/config/adjust_security.go b/cmd/system-probe/config/adjust_security.go index 46ac8f70593f8..f5b3d27ddf80c 100644 --- a/cmd/system-probe/config/adjust_security.go +++ b/cmd/system-probe/config/adjust_security.go @@ -9,12 +9,11 @@ import ( "runtime" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" ) -func adjustSecurity(cfg config.Config) { - deprecateCustom(cfg, secNS("activity_dump.cgroup_dump_timeout"), secNS("activity_dump.dump_duration"), func(cfg config.Config) interface{} { +func adjustSecurity(cfg model.Config) { + deprecateCustom(cfg, secNS("activity_dump.cgroup_dump_timeout"), secNS("activity_dump.dump_duration"), func(cfg model.Config) interface{} { // convert old minutes int value to time.Duration return time.Duration(cfg.GetInt(secNS("activity_dump.cgroup_dump_timeout"))) * time.Minute }) @@ -23,7 +22,7 @@ func adjustSecurity(cfg config.Config) { cfg, secNS("runtime_security_config.security_profile.anomaly_detection.auto_suppression.enabled"), secNS("runtime_security_config.security_profile.auto_suppression.enabled"), - func(cfg config.Config) interface{} { + func(cfg model.Config) interface{} { // convert old auto suppression parameter to the new one return cfg.GetBool(secNS("runtime_security_config.security_profile.anomaly_detection.auto_suppression.enabled")) }, diff --git a/cmd/system-probe/config/adjust_usm.go b/cmd/system-probe/config/adjust_usm.go index 05946c7fb72ed..d7e164a9020f1 100644 --- a/cmd/system-probe/config/adjust_usm.go +++ b/cmd/system-probe/config/adjust_usm.go @@ -9,14 +9,14 @@ import ( "fmt" "runtime" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) const ( maxHTTPFrag = 512 // matches hard limit currently imposed in NPM driver ) -func adjustUSM(cfg config.Config) { +func adjustUSM(cfg model.Config) { if cfg.GetBool(smNS("enabled")) { applyDefault(cfg, netNS("enable_http_monitoring"), true) applyDefault(cfg, netNS("enable_https_monitoring"), true) diff --git a/cmd/system-probe/config/config.go b/cmd/system-probe/config/config.go index ff21f22169103..e6f0575248e2a 100644 --- a/cmd/system-probe/config/config.go +++ b/cmd/system-probe/config/config.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/comp/core/secrets" - aconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -51,22 +51,22 @@ func New(configPath string, fleetPoliciesDirPath string) (*types.Config, error) } func newSysprobeConfig(configPath string, fleetPoliciesDirPath string) (*types.Config, error) { - aconfig.SystemProbe().SetConfigName("system-probe") + pkgconfigsetup.SystemProbe().SetConfigName("system-probe") // set the paths where a config file is expected if len(configPath) != 0 { // if the configuration file path was supplied on the command line, // add that first, so it's first in line - aconfig.SystemProbe().AddConfigPath(configPath) + pkgconfigsetup.SystemProbe().AddConfigPath(configPath) // If they set a config file directly, let's try to honor that if strings.HasSuffix(configPath, ".yaml") { - aconfig.SystemProbe().SetConfigFile(configPath) + pkgconfigsetup.SystemProbe().SetConfigFile(configPath) } } else { // only add default if a custom configPath was not supplied - aconfig.SystemProbe().AddConfigPath(defaultConfigDir) + pkgconfigsetup.SystemProbe().AddConfigPath(defaultConfigDir) } // load the configuration - err := aconfig.LoadCustom(aconfig.SystemProbe(), aconfig.Datadog().GetEnvVars()) + err := pkgconfigsetup.LoadCustom(pkgconfigsetup.SystemProbe(), pkgconfigsetup.Datadog().GetEnvVars()) if err != nil { if errors.Is(err, fs.ErrPermission) { // special-case permission-denied with a clearer error message @@ -84,10 +84,10 @@ func newSysprobeConfig(configPath string, fleetPoliciesDirPath string) (*types.C // Load the remote configuration if fleetPoliciesDirPath == "" { - fleetPoliciesDirPath = aconfig.SystemProbe().GetString("fleet_policies_dir") + fleetPoliciesDirPath = pkgconfigsetup.SystemProbe().GetString("fleet_policies_dir") } if fleetPoliciesDirPath != "" { - err := aconfig.SystemProbe().MergeFleetPolicy(path.Join(fleetPoliciesDirPath, "system-probe.yaml")) + err := pkgconfigsetup.SystemProbe().MergeFleetPolicy(path.Join(fleetPoliciesDirPath, "system-probe.yaml")) if err != nil { return nil, err } @@ -97,7 +97,7 @@ func newSysprobeConfig(configPath string, fleetPoliciesDirPath string) (*types.C } func load() (*types.Config, error) { - cfg := aconfig.SystemProbe() + cfg := pkgconfigsetup.SystemProbe() Adjust(cfg) c := &types.Config{ @@ -114,15 +114,16 @@ func load() (*types.Config, error) { HealthPort: cfg.GetInt(spNS("health_port")), TelemetryEnabled: cfg.GetBool(spNS("telemetry_enabled")), - StatsdHost: aconfig.GetBindHost(), + StatsdHost: pkgconfigsetup.GetBindHost(pkgconfigsetup.Datadog()), StatsdPort: cfg.GetInt("dogstatsd_port"), } npmEnabled := cfg.GetBool(netNS("enabled")) usmEnabled := cfg.GetBool(smNS("enabled")) ccmEnabled := cfg.GetBool(ccmNS("enabled")) + csmEnabled := cfg.GetBool(secNS("enabled")) - if npmEnabled || usmEnabled || ccmEnabled { + if npmEnabled || usmEnabled || ccmEnabled || csmEnabled { c.EnabledModules[NetworkTracerModule] = struct{}{} } if cfg.GetBool(spNS("enable_tcp_queue_length")) { @@ -181,12 +182,12 @@ func load() (*types.Config, error) { // SetupOptionalDatadogConfigWithDir loads the datadog.yaml config file from a given config directory but will not fail on a missing file func SetupOptionalDatadogConfigWithDir(configDir, configFile string) error { - aconfig.Datadog().AddConfigPath(configDir) + pkgconfigsetup.Datadog().AddConfigPath(configDir) if configFile != "" { - aconfig.Datadog().SetConfigFile(configFile) + pkgconfigsetup.Datadog().SetConfigFile(configFile) } // load the configuration - _, err := aconfig.LoadDatadogCustom(aconfig.Datadog(), "datadog.yaml", optional.NewNoneOption[secrets.Component](), aconfig.SystemProbe().GetEnvVars()) + _, err := pkgconfigsetup.LoadDatadogCustom(pkgconfigsetup.Datadog(), "datadog.yaml", optional.NewNoneOption[secrets.Component](), pkgconfigsetup.SystemProbe().GetEnvVars()) // If `!failOnMissingFile`, do not issue an error if we cannot find the default config file. var e viper.ConfigFileNotFoundError if err != nil && !errors.As(err, &e) { diff --git a/cmd/system-probe/config/config_linux_test.go b/cmd/system-probe/config/config_linux_test.go index aab12a5c52ed5..0998e2ce69b74 100644 --- a/cmd/system-probe/config/config_linux_test.go +++ b/cmd/system-probe/config/config_linux_test.go @@ -73,17 +73,25 @@ func TestEventStreamEnabledForSupportedKernelsLinux(t *testing.T) { func TestNPMEnabled(t *testing.T) { tests := []struct { - npm, usm, ccm bool - npmEnabled bool + npm, usm, ccm, csm bool + npmEnabled bool }{ - {false, false, false, false}, - {false, false, true, true}, - {false, true, false, true}, - {false, true, true, true}, - {true, false, false, true}, - {true, false, true, true}, - {true, true, false, true}, - {true, true, true, true}, + {false, false, false, false, false}, + {false, false, true, false, true}, + {false, true, false, false, true}, + {false, true, true, false, true}, + {true, false, false, false, true}, + {true, false, true, false, true}, + {true, true, false, false, true}, + {true, true, true, false, true}, + {false, false, false, true, true}, + {false, false, true, true, true}, + {false, true, false, true, true}, + {false, true, true, true, true}, + {true, false, false, true, true}, + {true, false, true, true, true}, + {true, true, false, true, true}, + {true, true, true, true, true}, } mock.NewSystemProbe(t) @@ -92,6 +100,7 @@ func TestNPMEnabled(t *testing.T) { t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLED", strconv.FormatBool(te.npm)) t.Setenv("DD_SYSTEM_PROBE_SERVICE_MONITORING_ENABLED", strconv.FormatBool(te.usm)) t.Setenv("DD_CCM_NETWORK_CONFIG_ENABLED", strconv.FormatBool(te.ccm)) + t.Setenv("DD_RUNTIME_SECURITY_CONFIG_ENABLED", strconv.FormatBool(te.csm)) cfg, err := New("", "") require.NoError(t, err) assert.Equal(t, te.npmEnabled, cfg.ModuleIsEnabled(NetworkTracerModule), "unexpected network tracer module enablement: npm: %v, usm: %v, ccm: %v", te.npm, te.usm, te.ccm) diff --git a/cmd/system-probe/modules/dynamic_instrumentation.go b/cmd/system-probe/modules/dynamic_instrumentation.go index 7ff8d7d48ba6d..bd4272b8c8295 100644 --- a/cmd/system-probe/modules/dynamic_instrumentation.go +++ b/cmd/system-probe/modules/dynamic_instrumentation.go @@ -14,23 +14,26 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation" + dimod "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/module" "github.com/DataDog/datadog-agent/pkg/ebpf" ) -// DynamicInstrumentation is the dynamic instrumentation module factory +// DynamicInstrumentation is a system probe module which allows you to add instrumentation into +// running Go services without restarts. var DynamicInstrumentation = module.Factory{ Name: config.DynamicInstrumentationModule, ConfigNamespaces: []string{}, Fn: func(agentConfiguration *sysconfigtypes.Config, _ module.FactoryDependencies) (module.Module, error) { - config, err := dynamicinstrumentation.NewConfig(agentConfiguration) + config, err := dimod.NewConfig(agentConfiguration) if err != nil { return nil, fmt.Errorf("invalid dynamic instrumentation module configuration: %w", err) } - - m, err := dynamicinstrumentation.NewModule(config) - if errors.Is(err, ebpf.ErrNotImplemented) { - return nil, module.ErrNotEnabled + m, err := dimod.NewModule(config) + if err != nil { + if errors.Is(err, ebpf.ErrNotImplemented) { + return nil, module.ErrNotEnabled + } + return nil, err } return m, nil diff --git a/cmd/system-probe/modules/eventmonitor.go b/cmd/system-probe/modules/eventmonitor.go index d94555cddd939..d90b39b3ea1f1 100644 --- a/cmd/system-probe/modules/eventmonitor.go +++ b/cmd/system-probe/modules/eventmonitor.go @@ -41,7 +41,7 @@ func createEventMonitorModule(_ *sysconfigtypes.Config, deps module.FactoryDepen secmodule.DisableRuntimeSecurity(secconfig) } - evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, deps.WMeta, deps.Telemetry) + evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, deps.Telemetry) if err != nil { log.Errorf("error initializing event monitoring module: %v", err) return nil, module.ErrNotEnabled diff --git a/cmd/system-probe/modules/eventmonitor_linux.go b/cmd/system-probe/modules/eventmonitor_linux.go index ebea6228c580d..bf63a33276723 100644 --- a/cmd/system-probe/modules/eventmonitor_linux.go +++ b/cmd/system-probe/modules/eventmonitor_linux.go @@ -10,7 +10,7 @@ package modules import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/config" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/eventmonitor" netconfig "github.com/DataDog/datadog-agent/pkg/network/config" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" @@ -24,7 +24,7 @@ var EventMonitor = module.Factory{ ConfigNamespaces: eventMonitorModuleConfigNamespaces, Fn: createEventMonitorModule, NeedsEBPF: func() bool { - return !coreconfig.SystemProbe().GetBool("runtime_security_config.ebpfless.enabled") + return !pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.ebpfless.enabled") }, } diff --git a/cmd/system-probe/modules/network_tracer.go b/cmd/system-probe/modules/network_tracer.go index 4852575b36a75..44f2af55d9c78 100644 --- a/cmd/system-probe/modules/network_tracer.go +++ b/cmd/system-probe/modules/network_tracer.go @@ -12,6 +12,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "os" "runtime" @@ -108,6 +109,16 @@ func (nt *networkTracer) Register(httpMux *module.Router) error { logRequests(id, count, len(cs.Conns), start) })) + httpMux.HandleFunc("/network_id", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, func(w http.ResponseWriter, req *http.Request) { + id, err := nt.tracer.GetNetworkID(req.Context()) + if err != nil { + log.Errorf("unable to retrieve network_id: %s", err) + w.WriteHeader(500) + return + } + io.WriteString(w, id) + })) + httpMux.HandleFunc("/register", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, func(w http.ResponseWriter, req *http.Request) { id := getClientID(req) err := nt.tracer.RegisterClient(id) diff --git a/cmd/system-probe/modules/traceroute.go b/cmd/system-probe/modules/traceroute.go index 320106472c087..6e0667e120784 100644 --- a/cmd/system-probe/modules/traceroute.go +++ b/cmd/system-probe/modules/traceroute.go @@ -98,8 +98,8 @@ func (t *traceroute) RegisterGRPC(_ grpc.ServiceRegistrar) error { func (t *traceroute) Close() {} func logTracerouteRequests(cfg tracerouteutil.Config, client string, runCount uint64, start time.Time) { - args := []interface{}{cfg.DestHostname, client, cfg.DestPort, cfg.MaxTTL, cfg.Timeout, runCount, time.Since(start)} - msg := "Got request on /traceroute/%s?client_id=%s&port=%d&maxTTL=%d&timeout=%d (count: %d): retrieved traceroute in %s" + args := []interface{}{cfg.DestHostname, client, cfg.DestPort, cfg.MaxTTL, cfg.Timeout, cfg.Protocol, runCount, time.Since(start)} + msg := "Got request on /traceroute/%s?client_id=%s&port=%d&maxTTL=%d&timeout=%d&protocol=%s (count: %d): retrieved traceroute in %s" switch { case runCount <= 5, runCount%20 == 0: log.Infof(msg, args...) diff --git a/cmd/system-probe/subcommands/config/command.go b/cmd/system-probe/subcommands/config/command.go index 0d9d81dcb47ce..7010d214143d5 100644 --- a/cmd/system-probe/subcommands/config/command.go +++ b/cmd/system-probe/subcommands/config/command.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config/fetcher" + fetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher/sysprobe" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" "github.com/DataDog/datadog-agent/pkg/util/fxutil" diff --git a/cmd/system-probe/subcommands/run/command.go b/cmd/system-probe/subcommands/run/command.go index c61f0d1316d99..e5461f8c483db 100644 --- a/cmd/system-probe/subcommands/run/command.go +++ b/cmd/system-probe/subcommands/run/command.go @@ -50,7 +50,6 @@ import ( compstatsd "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient/rcclientimpl" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/model" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" @@ -384,7 +383,7 @@ func stopSystemProbe() { } // setupInternalProfiling is a common helper to configure runtime settings for internal profiling. -func setupInternalProfiling(settings settings.Component, cfg ddconfig.Reader, configPrefix string, log log.Component) { +func setupInternalProfiling(settings settings.Component, cfg model.Reader, configPrefix string, log log.Component) { if v := cfg.GetInt(configPrefix + "internal_profiling.block_profile_rate"); v > 0 { if err := settings.SetRuntimeSetting("runtime_block_profile_rate", v, model.SourceAgentRuntime); err != nil { log.Errorf("Error setting block profile rate: %v", err) diff --git a/cmd/system-probe/subcommands/runtime/command.go b/cmd/system-probe/subcommands/runtime/command.go index 5016d28166e48..2bce948b2764c 100644 --- a/cmd/system-probe/subcommands/runtime/command.go +++ b/cmd/system-probe/subcommands/runtime/command.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" secagent "github.com/DataDog/datadog-agent/pkg/security/agent" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" pconfig "github.com/DataDog/datadog-agent/pkg/security/probe/config" @@ -99,7 +99,7 @@ func evalCommands(globalParams *command.GlobalParams) []*cobra.Command { }, } - evalCmd.Flags().StringVar(&evalArgs.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + evalCmd.Flags().StringVar(&evalArgs.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") evalCmd.Flags().StringVar(&evalArgs.ruleID, "rule-id", "", "Rule ID to evaluate") _ = evalCmd.MarkFlagRequired("rule-id") evalCmd.Flags().StringVar(&evalArgs.eventFile, "event-file", "", "File of the event data") @@ -132,7 +132,7 @@ func commonCheckPoliciesCommands(globalParams *command.GlobalParams) []*cobra.Co }, } - commonCheckPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + commonCheckPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") commonCheckPoliciesCmd.Flags().BoolVar(&cliParams.evaluateAllPolicySources, "loaded-policies", false, "Evaluate loaded policies") if runtime.GOOS == "linux" { commonCheckPoliciesCmd.Flags().BoolVar(&cliParams.windowsModel, "windows-model", false, "Evaluate policies using the Windows model") diff --git a/cmd/trace-agent/test/testsuite/traces_test.go b/cmd/trace-agent/test/testsuite/traces_test.go index 1404d6b558b12..93790de58b6ef 100644 --- a/cmd/trace-agent/test/testsuite/traces_test.go +++ b/cmd/trace-agent/test/testsuite/traces_test.go @@ -184,6 +184,34 @@ func TestTraces(t *testing.T) { }) }) + t.Run("normalize, obfuscate, sqllexer", func(t *testing.T) { + if err := r.RunAgent([]byte("apm_config:\r\n features:[\"sqllexer\"]\r\n")); err != nil { + t.Fatal(err) + } + defer r.KillAgent() + + p := testutil.GeneratePayload(1, &testutil.TraceConfig{ + MinSpans: 4, + Keep: true, + }, nil) + for _, span := range p[0] { + span.Service = strings.Repeat("a", 200) // Too long + span.Name = strings.Repeat("b", 200) // Too long + } + p[0][0].Type = "sql" + p[0][0].Resource = "SELECT secret FROM users WHERE id = 123" + if err := r.Post(p); err != nil { + t.Fatal(err) + } + waitForTrace(t, &r, func(v *pb.AgentPayload) { + assert.Equal(t, "SELECT secret FROM users WHERE id = ?", v.TracerPayloads[0].Chunks[0].Spans[0].Resource) + for _, s := range v.TracerPayloads[0].Chunks[0].Spans { + assert.Len(t, s.Service, 100) + assert.Len(t, s.Name, 100) + } + }) + }) + t.Run("probabilistic", func(t *testing.T) { if err := r.RunAgent([]byte("apm_config:\r\n probabilistic_sampler:\r\n enabled: true\r\n sampling_percentage: 100\r\n")); err != nil { t.Fatal(err) diff --git a/comp/README.md b/comp/README.md index 357b628c7a3c8..5c6b4887087f5 100644 --- a/comp/README.md +++ b/comp/README.md @@ -375,10 +375,6 @@ Package collector defines the OpenTelemetry Collector component. Package collectorcontrib defines the OTel collector-contrib component -### [comp/otelcol/configstore](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/otelcol/configstore) - -Package configstore defines the otel agent configstore component. - ### [comp/otelcol/converter](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/otelcol/converter) Package converter defines the otel agent converter component. diff --git a/comp/agent/bundle_test.go b/comp/agent/bundle_test.go index 7f60820804110..1f52881bc6596 100644 --- a/comp/agent/bundle_test.go +++ b/comp/agent/bundle_test.go @@ -8,8 +8,6 @@ package agent import ( "testing" - "go.uber.org/fx" - "github.com/DataDog/datadog-agent/comp/agent/jmxlogger/jmxloggerimpl" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" @@ -30,8 +28,7 @@ func TestBundleDependencies(t *testing.T) { defaultforwarder.MockModule(), orchestratorimpl.MockModule(), eventplatformimpl.MockModule(), - demultiplexerimpl.Module(), - fx.Supply(demultiplexerimpl.NewDefaultParams()), + demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams()), workloadmetafxmock.MockModule(workloadmeta.NewParams()), ) } diff --git a/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go b/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go index 1fe9aa55b1dfa..9b0824b8fad41 100644 --- a/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go +++ b/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go @@ -14,9 +14,10 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/common/path" "github.com/DataDog/datadog-agent/comp/agent/jmxlogger" "github.com/DataDog/datadog-agent/comp/core/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) // Module defines the fx options for this component. @@ -41,7 +42,7 @@ func newJMXLogger(deps dependencies) (jmxlogger.Component, error) { return logger{}, nil } if deps.Params.fromCLI { - err := pkgconfig.SetupJMXLogger(deps.Params.logFile, "", false, true, false) + err := pkglogsetup.SetupJMXLogger(deps.Params.logFile, "", false, true, false, pkgconfigsetup.Datadog()) if err != nil { err = fmt.Errorf("Unable to set up JMX logger: %v", err) } @@ -49,7 +50,7 @@ func newJMXLogger(deps dependencies) (jmxlogger.Component, error) { } // Setup logger - syslogURI := pkgconfig.GetSyslogURI() + syslogURI := pkglogsetup.GetSyslogURI(pkgconfigsetup.Datadog()) jmxLogFile := config.GetString("jmx_log_file") if jmxLogFile == "" { jmxLogFile = path.DefaultJmxLogFile @@ -61,12 +62,13 @@ func newJMXLogger(deps dependencies) (jmxlogger.Component, error) { } // Setup JMX logger - jmxLoggerSetupErr := pkgconfig.SetupJMXLogger( + jmxLoggerSetupErr := pkglogsetup.SetupJMXLogger( jmxLogFile, syslogURI, config.GetBool("syslog_rfc"), config.GetBool("log_to_console"), config.GetBool("log_format_json"), + pkgconfigsetup.Datadog(), ) if jmxLoggerSetupErr != nil { diff --git a/comp/aggregator/bundle.go b/comp/aggregator/bundle.go index a818808a85fce..3539e731085cc 100644 --- a/comp/aggregator/bundle.go +++ b/comp/aggregator/bundle.go @@ -14,7 +14,7 @@ import ( // team: agent-metrics-logs // Bundle defines the fx options for this bundle. -func Bundle() fxutil.BundleOptions { +func Bundle(params demultiplexerimpl.Params) fxutil.BundleOptions { return fxutil.Bundle( - demultiplexerimpl.Module()) + demultiplexerimpl.Module(params)) } diff --git a/comp/aggregator/bundle_test.go b/comp/aggregator/bundle_test.go index ff208e7fb570f..d1a4cb2d4ecbd 100644 --- a/comp/aggregator/bundle_test.go +++ b/comp/aggregator/bundle_test.go @@ -8,8 +8,6 @@ package aggregator import ( "testing" - "go.uber.org/fx" - "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" @@ -20,12 +18,11 @@ import ( ) func TestBundleDependencies(t *testing.T) { - fxutil.TestBundle(t, Bundle(), + fxutil.TestBundle(t, Bundle(demultiplexerimpl.Params{}), core.MockBundle(), compressionimpl.MockModule(), defaultforwarder.MockModule(), orchestratorForwarderImpl.MockModule(), - fx.Supply(demultiplexerimpl.Params{}), eventplatformimpl.MockModule(), ) } diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go index d3b52d674ac8a..d51c825f847d1 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go @@ -13,6 +13,7 @@ import ( demultiplexerComp "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" "github.com/DataDog/datadog-agent/comp/aggregator/diagnosesendermanager" + "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/status" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" @@ -26,14 +27,16 @@ import ( ) // Module defines the fx options for this component. -func Module() fxutil.Module { +func Module(params Params) fxutil.Module { return fxutil.Component( - fx.Provide(newDemultiplexer)) + fx.Provide(newDemultiplexer), + fx.Supply(params)) } type dependencies struct { fx.In Lc fx.Lifecycle + Config config.Component Log log.Component SharedForwarder defaultforwarder.Component OrchestratorForwarder orchestratorforwarder.Component @@ -67,19 +70,19 @@ type provides struct { func newDemultiplexer(deps dependencies) (provides, error) { hostnameDetected, err := hostname.Get(context.TODO()) if err != nil { - if deps.Params.ContinueOnMissingHostname { + if deps.Params.continueOnMissingHostname { deps.Log.Warnf("Error getting hostname: %s", err) hostnameDetected = "" } else { return provides{}, deps.Log.Errorf("Error while getting hostname, exiting: %v", err) } } - + options := createAgentDemultiplexerOptions(deps.Config, deps.Params) agentDemultiplexer := aggregator.InitAndStartAgentDemultiplexer( deps.Log, deps.SharedForwarder, deps.OrchestratorForwarder, - deps.Params.AgentDemultiplexerOptions, + options, deps.EventPlatformForwarder, deps.Compressor, hostnameDetected) @@ -102,6 +105,19 @@ func newDemultiplexer(deps dependencies) (provides, error) { }, nil } +func createAgentDemultiplexerOptions(config config.Component, params Params) aggregator.AgentDemultiplexerOptions { + options := aggregator.DefaultAgentDemultiplexerOptions() + if params.useDogstatsdNoAggregationPipelineConfig { + options.EnableNoAggregationPipeline = config.GetBool("dogstatsd_no_aggregation_pipeline") + } + + // Override FlushInterval only if flushInterval is set by the user + if v, ok := params.flushInterval.Get(); ok { + options.FlushInterval = v + } + return options +} + // LazyGetSenderManager gets an instance of SenderManager lazily. func (demux demultiplexer) LazyGetSenderManager() (sender.SenderManager, error) { return demux, nil diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/params.go b/comp/aggregator/demultiplexer/demultiplexerimpl/params.go index 1653ca820b7f5..b176d9b30a0ef 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/params.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/params.go @@ -5,18 +5,51 @@ package demultiplexerimpl -import "github.com/DataDog/datadog-agent/pkg/aggregator" +import ( + "time" + + "github.com/DataDog/datadog-agent/pkg/util/optional" +) // Params contains the parameters for the demultiplexer type Params struct { - aggregator.AgentDemultiplexerOptions - ContinueOnMissingHostname bool + continueOnMissingHostname bool + + // This is an optional field to override the default flush interval only if it is set + flushInterval optional.Option[time.Duration] + + useDogstatsdNoAggregationPipelineConfig bool } +// Option is a function that sets a parameter in the Params struct +type Option func(*Params) + // NewDefaultParams returns the default parameters for the demultiplexer -func NewDefaultParams() Params { - return Params{ - AgentDemultiplexerOptions: aggregator.DefaultAgentDemultiplexerOptions(), - ContinueOnMissingHostname: false, +func NewDefaultParams(options ...Option) Params { + p := Params{} + for _, o := range options { + o(&p) + } + return p +} + +// WithContinueOnMissingHostname sets the continueOnMissingHostname field to true +func WithContinueOnMissingHostname() Option { + return func(p *Params) { + p.continueOnMissingHostname = true + } +} + +// WithFlushInterval sets the flushInterval field to the provided duration +func WithFlushInterval(duration time.Duration) Option { + return func(p *Params) { + p.flushInterval = optional.NewOption(duration) + } +} + +// WithDogstatsdNoAggregationPipelineConfig uses the config dogstatsd_no_aggregation_pipeline +func WithDogstatsdNoAggregationPipelineConfig() Option { + return func(p *Params) { + p.useDogstatsdNoAggregationPipelineConfig = true } } diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/status_test.go b/comp/aggregator/demultiplexer/demultiplexerimpl/status_test.go index 41fea8657e3d9..8ec83cb0cddf0 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/status_test.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/status_test.go @@ -61,7 +61,7 @@ func TestStatusOutPut(t *testing.T) { eventplatformimpl.MockModule(), fx.Supply( Params{ - ContinueOnMissingHostname: true, + continueOnMissingHostname: true, }, ), )) diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go b/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go index a4a4f21b576bb..cec4437ab09f8 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" "github.com/DataDog/datadog-agent/comp/serializer/compression" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -180,8 +180,8 @@ func initTestAgentDemultiplexerWithFlushInterval(log log.Component, hostname hos opts.DontStartForwarders = true opts.EnableNoAggregationPipeline = true - sharedForwarderOptions := defaultforwarder.NewOptions(config.Datadog(), log, nil) - sharedForwarder := defaultforwarder.NewDefaultForwarder(config.Datadog(), log, sharedForwarderOptions) + sharedForwarderOptions := defaultforwarder.NewOptions(pkgconfigsetup.Datadog(), log, nil) + sharedForwarder := defaultforwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), log, sharedForwarderOptions) orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostname)) demux := aggregator.InitAndStartAgentDemultiplexer(log, sharedForwarder, &orchestratorForwarder, opts, eventPlatformForwarder, compressor, "hostname") diff --git a/comp/api/api/apiimpl/internal/config/endpoint.go b/comp/api/api/apiimpl/internal/config/endpoint.go index 40e7751a0b2b2..4b812d1e522b3 100644 --- a/comp/api/api/apiimpl/internal/config/endpoint.go +++ b/comp/api/api/apiimpl/internal/config/endpoint.go @@ -17,7 +17,8 @@ import ( gorilla "github.com/gorilla/mux" api "github.com/DataDog/datadog-agent/comp/api/api/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" util "github.com/DataDog/datadog-agent/pkg/util/common" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -25,7 +26,7 @@ import ( const prefixPathSuffix string = "." type configEndpoint struct { - cfg config.Reader + cfg model.Reader authorizedConfigPaths api.AuthorizedSet // runtime metrics about the config endpoint usage @@ -109,18 +110,18 @@ func (c *configEndpoint) getAllConfigValuesHandler(w http.ResponseWriter, r *htt // GetConfigEndpointMuxCore builds and returns the mux for the config endpoint with default values // for the core agent func GetConfigEndpointMuxCore() *gorilla.Router { - return GetConfigEndpointMux(config.Datadog(), api.AuthorizedConfigPathsCore, "core") + return GetConfigEndpointMux(pkgconfigsetup.Datadog(), api.AuthorizedConfigPathsCore, "core") } // GetConfigEndpointMux builds and returns the mux for the config endpoint, with the given config, // authorized paths, and expvar namespace -func GetConfigEndpointMux(cfg config.Reader, authorizedConfigPaths api.AuthorizedSet, expvarNamespace string) *gorilla.Router { +func GetConfigEndpointMux(cfg model.Reader, authorizedConfigPaths api.AuthorizedSet, expvarNamespace string) *gorilla.Router { mux, _ := getConfigEndpoint(cfg, authorizedConfigPaths, expvarNamespace) return mux } // getConfigEndpoint builds and returns the mux and the endpoint state. -func getConfigEndpoint(cfg config.Reader, authorizedConfigPaths api.AuthorizedSet, expvarNamespace string) (*gorilla.Router, *configEndpoint) { +func getConfigEndpoint(cfg model.Reader, authorizedConfigPaths api.AuthorizedSet, expvarNamespace string) (*gorilla.Router, *configEndpoint) { configEndpoint := &configEndpoint{ cfg: cfg, authorizedConfigPaths: authorizedConfigPaths, @@ -142,7 +143,7 @@ func getConfigEndpoint(cfg config.Reader, authorizedConfigPaths api.AuthorizedSe return configEndpointMux, configEndpoint } -func encodeInterfaceSliceToStringMap(c config.Reader, key string) ([]map[string]string, error) { +func encodeInterfaceSliceToStringMap(c model.Reader, key string) ([]map[string]string, error) { value := c.Get(key) if value == nil { return nil, nil diff --git a/comp/api/api/apiimpl/listener.go b/comp/api/api/apiimpl/listener.go index be0857a438926..7fdbafcc9a68b 100644 --- a/comp/api/api/apiimpl/listener.go +++ b/comp/api/api/apiimpl/listener.go @@ -10,16 +10,16 @@ import ( "net" "strconv" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // getIPCAddressPort returns a listening connection func getIPCAddressPort() (string, error) { - address, err := config.GetIPCAddress() + address, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } - return fmt.Sprintf("%v:%v", address, config.Datadog().GetInt("cmd_port")), nil + return fmt.Sprintf("%v:%v", address, pkgconfigsetup.Datadog().GetInt("cmd_port")), nil } // getListener returns a listening connection @@ -29,12 +29,12 @@ func getListener(address string) (net.Listener, error) { // returns whether the IPC server is enabled, and if so its host and host:port func getIPCServerAddressPort() (string, string, bool) { - ipcServerPort := config.Datadog().GetInt("agent_ipc.port") + ipcServerPort := pkgconfigsetup.Datadog().GetInt("agent_ipc.port") if ipcServerPort == 0 { return "", "", false } - ipcServerHost := config.Datadog().GetString("agent_ipc.host") + ipcServerHost := pkgconfigsetup.Datadog().GetString("agent_ipc.host") ipcServerHostPort := net.JoinHostPort(ipcServerHost, strconv.Itoa(ipcServerPort)) return ipcServerHost, ipcServerHostPort, true diff --git a/comp/api/api/apiimpl/server.go b/comp/api/api/apiimpl/server.go index 1c4d0bffb7d8c..d1a3105ee5d3d 100644 --- a/comp/api/api/apiimpl/server.go +++ b/comp/api/api/apiimpl/server.go @@ -16,13 +16,13 @@ import ( "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/observability" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) func startServer(listener net.Listener, srv *http.Server, name string) { // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := config.NewLogWriter(5, seelog.ErrorLvl) + logWriter, _ := pkglogsetup.NewLogWriter(5, seelog.ErrorLvl) srv.ErrorLog = stdLog.New(logWriter, fmt.Sprintf("Error from the Agent HTTP server '%s': ", name), 0) // log errors to seelog diff --git a/comp/api/api/apiimpl/server_cmd.go b/comp/api/api/apiimpl/server_cmd.go index 117b4d18c6be3..56215842c06d0 100644 --- a/comp/api/api/apiimpl/server_cmd.go +++ b/comp/api/api/apiimpl/server_cmd.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/observability" taggerserver "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/server" workloadmetaServer "github.com/DataDog/datadog-agent/comp/core/workloadmeta/server" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" ) @@ -123,7 +123,7 @@ func (server *apiServer) startCMDServer( cmdAddr, tlsConfig, s, - grpcutil.TimeoutHandlerFunc(cmdMuxHandler, time.Duration(config.Datadog().GetInt64("server_timeout"))*time.Second), + grpcutil.TimeoutHandlerFunc(cmdMuxHandler, time.Duration(pkgconfigsetup.Datadog().GetInt64("server_timeout"))*time.Second), ) startServer(server.cmdListener, srv, cmdServerName) diff --git a/comp/api/api/apiimpl/server_ipc.go b/comp/api/api/apiimpl/server_ipc.go index bfc50218c99c9..10e17509f75e3 100644 --- a/comp/api/api/apiimpl/server_ipc.go +++ b/comp/api/api/apiimpl/server_ipc.go @@ -12,7 +12,7 @@ import ( configendpoint "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/internal/config" "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/observability" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ipcServerName string = "IPC API Server" @@ -38,7 +38,7 @@ func (server *apiServer) startIPCServer(ipcServerAddr string, tlsConfig *tls.Con ipcServer := &http.Server{ Addr: ipcServerAddr, - Handler: http.TimeoutHandler(ipcMuxHandler, time.Duration(config.Datadog().GetInt64("server_timeout"))*time.Second, "timeout"), + Handler: http.TimeoutHandler(ipcMuxHandler, time.Duration(pkgconfigsetup.Datadog().GetInt64("server_timeout"))*time.Second, "timeout"), TLSConfig: tlsConfig, } diff --git a/comp/api/authtoken/go.mod b/comp/api/authtoken/go.mod new file mode 100644 index 0000000000000..b4fadeccdc96d --- /dev/null +++ b/comp/api/authtoken/go.mod @@ -0,0 +1,115 @@ +module github.com/DataDog/datadog-agent/comp/api/authtoken + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/cmd/agent/common/path => ../../../cmd/agent/common/path + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def + github.com/DataDog/datadog-agent/comp/core/config => ../../../comp/core/config + github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../comp/core/flare/builder + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../comp/core/flare/types + github.com/DataDog/datadog-agent/comp/core/log/def => ../../../comp/core/log/def + github.com/DataDog/datadog-agent/comp/core/log/mock => ../../../comp/core/log/mock + github.com/DataDog/datadog-agent/comp/core/secrets => ../../../comp/core/secrets + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry + github.com/DataDog/datadog-agent/comp/def => ../../../comp/def + github.com/DataDog/datadog-agent/pkg/api => ../../../pkg/api + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock + github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel + github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig + github.com/DataDog/datadog-agent/pkg/config/utils => ../../../pkg/config/utils + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate/ + github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../pkg/util/log/setup + github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../pkg/util/system/socket/ + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../pkg/util/testutil + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../pkg/util/winutil + github.com/DataDog/datadog-agent/pkg/version => ../../../pkg/version +) + +require ( + github.com/DataDog/datadog-agent/comp/core/config v0.56.0 + github.com/DataDog/datadog-agent/comp/core/log/def v0.58.0-devel + github.com/DataDog/datadog-agent/comp/core/log/mock v0.58.0-devel + github.com/DataDog/datadog-agent/pkg/api v0.56.0 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0 + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0 + github.com/stretchr/testify v1.9.0 + go.uber.org/fx v1.22.2 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0 // indirect + github.com/DataDog/datadog-agent/comp/def v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0 // indirect + github.com/DataDog/datadog-agent/pkg/version v0.56.0 // indirect + github.com/DataDog/viper v1.13.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.25.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/comp/api/authtoken/go.sum b/comp/api/authtoken/go.sum new file mode 100644 index 0000000000000..b8178bc54e8ad --- /dev/null +++ b/comp/api/authtoken/go.sum @@ -0,0 +1,357 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/comp/autoscaling/datadogclient/impl/client.go b/comp/autoscaling/datadogclient/impl/client.go index 406b793b916ff..ee204f1f921f3 100644 --- a/comp/autoscaling/datadogclient/impl/client.go +++ b/comp/autoscaling/datadogclient/impl/client.go @@ -14,6 +14,7 @@ import ( configComponent "github.com/DataDog/datadog-agent/comp/core/config" logComp "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/status" + "github.com/DataDog/datadog-agent/pkg/config/structure" "gopkg.in/zorkian/go-datadog-api.v2" ) @@ -120,7 +121,7 @@ func (d *datadogClientWrapper) refreshClient() { func createDatadogClient(cfg configComponent.Component, logger logComp.Component) (datadogclient.Component, error) { if cfg.IsSet(metricsRedundantEndpointConfig) { var endpoints []endpoint - if err := cfg.UnmarshalKey(metricsRedundantEndpointConfig, &endpoints); err != nil { + if err := structure.UnmarshalKey(cfg, metricsRedundantEndpointConfig, &endpoints); err != nil { return nil, fmt.Errorf("could not parse %s: %v", metricsRedundantEndpointConfig, err) } diff --git a/comp/collector/collector/collectorimpl/collector.go b/comp/collector/collector/collectorimpl/collector.go index e265871190fef..f90b0c61cec88 100644 --- a/comp/collector/collector/collectorimpl/collector.go +++ b/comp/collector/collector/collectorimpl/collector.go @@ -8,6 +8,7 @@ package collectorimpl import ( "context" + "encoding/json" "fmt" "sync" "time" @@ -20,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/collector/collector" "github.com/DataDog/datadog-agent/comp/collector/collector/collectorimpl/internal/middleware" "github.com/DataDog/datadog-agent/comp/core/config" + flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/status" metadata "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" @@ -30,6 +32,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/runner" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" "github.com/DataDog/datadog-agent/pkg/collector/scheduler" + "github.com/DataDog/datadog-agent/pkg/sbom/collectors/host" + "github.com/DataDog/datadog-agent/pkg/sbom/scanner" "github.com/DataDog/datadog-agent/pkg/serializer" collectorStatus "github.com/DataDog/datadog-agent/pkg/status/collector" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -81,6 +85,7 @@ type provides struct { StatusProvider status.InformationProvider MetadataProvider metadata.Provider APIGetPyStatus api.AgentEndpointProvider + FlareProvider flaretypes.Provider } // Module defines the fx options for this component. @@ -106,6 +111,7 @@ func newProvides(deps dependencies) provides { StatusProvider: status.NewInformationProvider(collectorStatus.Provider{}), MetadataProvider: agentCheckMetadata, APIGetPyStatus: api.NewAgentEndpointProvider(getPythonStatus, "/py/status", "GET"), + FlareProvider: flaretypes.NewProvider(c.fillFlare), } } @@ -132,6 +138,35 @@ func newCollector(deps dependencies) *collectorImpl { return c } +// fillFlare collects all the information related to integrations that need to be added to each flare +func (c *collectorImpl) fillFlare(fb flaretypes.FlareBuilder) error { + scanner := scanner.GetGlobalScanner() + if scanner == nil { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + scanRequest := host.NewHostScanRequest() + scanResult := scanner.PerformScan(ctx, scanRequest, scanner.GetCollector(scanRequest.Collector())) + if scanResult.Error != nil { + return scanResult.Error + } + + cycloneDX, err := scanResult.Report.ToCycloneDX() + if err != nil { + return err + } + + jsonContent, err := json.MarshalIndent(cycloneDX, "", " ") + if err != nil { + return err + } + + return fb.AddFile("host-sbom.json", jsonContent) +} + // AddEventReceiver adds a callback to the collector to be called each time a check is added or removed. func (c *collectorImpl) AddEventReceiver(cb collector.EventReceiver) { c.m.Lock() diff --git a/comp/core/agenttelemetry/impl/config.go b/comp/core/agenttelemetry/impl/config.go index a33a440162119..3109a400b573c 100644 --- a/comp/core/agenttelemetry/impl/config.go +++ b/comp/core/agenttelemetry/impl/config.go @@ -237,7 +237,7 @@ var defaultProfiles = ` - check_name:network - check_name:io - check_name:file_handle - metrics: + metrics: - name: checks.runs aggregate_tags: - check_name @@ -257,6 +257,10 @@ var defaultProfiles = ` - name: logs.destination_http_resp aggregate_tags: - status_code + - name: oracle.activity_samples_count + - name: oracle.activity_latency + - name: oracle.statement_metrics + - name: oracle.statement_plan_errors - name: transactions.input_count - name: transactions.requeued - name: transactions.retries diff --git a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go index 700d3247f7e5f..48047e5b06d0f 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go @@ -37,7 +37,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -479,7 +479,7 @@ func (ac *AutoConfig) processNewConfig(config integration.Config) integration.Co // AddListeners tries to initialise the listeners listed in the given configs. A first // try is done synchronously. If a listener fails with a ErrWillRetry, the initialization // will be re-triggered later until success or ErrPermaFail. -func (ac *AutoConfig) AddListeners(listenerConfigs []config.Listeners) { +func (ac *AutoConfig) AddListeners(listenerConfigs []pkgconfigsetup.Listeners) { ac.addListenerCandidates(listenerConfigs) remaining := ac.initListenerCandidates() if !remaining { @@ -495,7 +495,7 @@ func (ac *AutoConfig) AddListeners(listenerConfigs []config.Listeners) { } } -func (ac *AutoConfig) addListenerCandidates(listenerConfigs []config.Listeners) { +func (ac *AutoConfig) addListenerCandidates(listenerConfigs []pkgconfigsetup.Listeners) { ac.m.Lock() defer ac.m.Unlock() diff --git a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go index 6b4a269ccea64..61c5abe56402d 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go @@ -35,8 +35,9 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/DataDog/datadog-agent/pkg/util/retry" ) @@ -86,7 +87,7 @@ func (l *MockListener) fakeFactory(listeners.Config, *acTelemetry.Store) (listen return l, nil } -var mockListenenerConfig = config.Listeners{ +var mockListenenerConfig = pkgconfigsetup.Listeners{ Name: "mock", } @@ -173,14 +174,15 @@ type AutoConfigTestSuite struct { // SetupSuite saves the original listener registry func (suite *AutoConfigTestSuite) SetupSuite() { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) } @@ -218,7 +220,7 @@ func (suite *AutoConfigTestSuite) TestAddListener() { ml := &MockListener{} listeners.Register("mock", ml.fakeFactory, ac.serviceListenerFactories) - ac.AddListeners([]config.Listeners{mockListenenerConfig}) + ac.AddListeners([]pkgconfigsetup.Listeners{mockListenenerConfig}) ac.m.Lock() require.Len(suite.T(), ac.listeners, 1) @@ -255,7 +257,7 @@ func (suite *AutoConfigTestSuite) TestStop() { ml := &MockListener{} listeners.Register("mock", ml.fakeFactory, ac.serviceListenerFactories) - ac.AddListeners([]config.Listeners{mockListenenerConfig}) + ac.AddListeners([]pkgconfigsetup.Listeners{mockListenenerConfig}) ac.Stop() @@ -300,7 +302,7 @@ func (suite *AutoConfigTestSuite) TestListenerRetry() { } listeners.Register("retry", retryFactory.make, ac.serviceListenerFactories) - configs := []config.Listeners{ + configs := []pkgconfigsetup.Listeners{ {Name: "noerr"}, {Name: "fail"}, {Name: "retry"}, diff --git a/comp/core/autodiscovery/autodiscoveryimpl/secrets.go b/comp/core/autodiscovery/autodiscoveryimpl/secrets.go index 68267d658848f..89b942fab6109 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/secrets.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/secrets.go @@ -10,12 +10,12 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/secrets" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) func decryptConfig(conf integration.Config, secretResolver secrets.Component) (integration.Config, error) { - if config.Datadog().GetBool("secret_backend_skip_checks") { + if pkgconfigsetup.Datadog().GetBool("secret_backend_skip_checks") { log.Tracef("'secret_backend_skip_checks' is enabled, not decrypting configuration %q", conf.Name) return conf, nil } diff --git a/comp/core/autodiscovery/common/utils/container_collect_all.go b/comp/core/autodiscovery/common/utils/container_collect_all.go index 9c040979459ca..08105f317841c 100644 --- a/comp/core/autodiscovery/common/utils/container_collect_all.go +++ b/comp/core/autodiscovery/common/utils/container_collect_all.go @@ -7,7 +7,7 @@ package utils import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // AddContainerCollectAllConfigs adds a config template containing an empty @@ -15,7 +15,7 @@ import ( // will be filtered out during config resolution if another config template // also has logs configuration. func AddContainerCollectAllConfigs(configs []integration.Config, adIdentifier string) []integration.Config { - if !config.Datadog().GetBool("logs_config.container_collect_all") { + if !pkgconfigsetup.Datadog().GetBool("logs_config.container_collect_all") { return configs } diff --git a/comp/core/autodiscovery/common/utils/pod_annotations.go b/comp/core/autodiscovery/common/utils/pod_annotations.go index d498245beb39f..2db5b0c323305 100644 --- a/comp/core/autodiscovery/common/utils/pod_annotations.go +++ b/comp/core/autodiscovery/common/utils/pod_annotations.go @@ -58,6 +58,7 @@ func parseChecksJSON(adIdentifier string, checksJSON string) ([]integration.Conf Name string `json:"name"` InitConfig json.RawMessage `json:"init_config"` Instances []interface{} `json:"instances"` + Logs json.RawMessage `json:"logs"` IgnoreAutodiscoveryTags bool `json:"ignore_autodiscovery_tags"` } @@ -83,6 +84,9 @@ func parseChecksJSON(adIdentifier string, checksJSON string) ([]integration.Conf IgnoreAutodiscoveryTags: config.IgnoreAutodiscoveryTags, } + if len(config.Logs) > 0 { + c.LogsConfig = integration.Data(config.Logs) + } for _, i := range config.Instances { instance, err := parseJSONObjToData(i) if err != nil { diff --git a/comp/core/autodiscovery/common/utils/pod_annotations_test.go b/comp/core/autodiscovery/common/utils/pod_annotations_test.go index a583e94c422ed..7043e7c19e301 100644 --- a/comp/core/autodiscovery/common/utils/pod_annotations_test.go +++ b/comp/core/autodiscovery/common/utils/pod_annotations_test.go @@ -428,6 +428,25 @@ func TestExtractTemplatesFromAnnotations(t *testing.T) { }, }, }, + { + name: "v2 annotations label logs", + annotations: map[string]string{ + "ad.datadoghq.com/foobar.checks": `{ + "apache": { + "logs": [{"service":"any_service","source":"any_source"}] + } + }`, + }, + adIdentifier: "foobar", + output: []integration.Config{ + { + Name: "apache", + LogsConfig: integration.Data("[{\"service\":\"any_service\",\"source\":\"any_source\"}]"), + ADIdentifiers: []string{adID}, + InitConfig: integration.Data("{}"), + }, + }, + }, } for _, tt := range tests { diff --git a/comp/core/autodiscovery/common/utils/prometheus.go b/comp/core/autodiscovery/common/utils/prometheus.go index ce5fb7091131b..8f8b76fba7d65 100644 --- a/comp/core/autodiscovery/common/utils/prometheus.go +++ b/comp/core/autodiscovery/common/utils/prometheus.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -24,7 +24,7 @@ const ( // buildInstances generates check config instances based on the Prometheus config and the object annotations // The second returned value is true if more than one instance is found func buildInstances(pc *types.PrometheusCheck, annotations map[string]string, namespacedName string) ([]integration.Data, bool) { - openmetricsVersion := config.Datadog().GetInt("prometheus_scrape.version") + openmetricsVersion := pkgconfigsetup.Datadog().GetInt("prometheus_scrape.version") instances := []integration.Data{} for k, v := range pc.AD.KubeAnnotations.Incl { diff --git a/comp/core/autodiscovery/common/utils/prometheus_apiserver_test.go b/comp/core/autodiscovery/common/utils/prometheus_apiserver_test.go index 2844bb561d39b..3d3bb4a349e19 100644 --- a/comp/core/autodiscovery/common/utils/prometheus_apiserver_test.go +++ b/comp/core/autodiscovery/common/utils/prometheus_apiserver_test.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -220,7 +220,7 @@ func TestConfigsForService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config.Datadog().SetWithoutSource("prometheus_scrape.version", tt.version) + pkgconfigsetup.Datadog().SetWithoutSource("prometheus_scrape.version", tt.version) assert.NoError(t, tt.check.Init(tt.version)) assert.ElementsMatch(t, tt.want, ConfigsForService(tt.check, tt.svc)) }) diff --git a/comp/core/autodiscovery/common/utils/prometheus_kubelet_test.go b/comp/core/autodiscovery/common/utils/prometheus_kubelet_test.go index 54983bd39e84d..d13b5919b6c69 100644 --- a/comp/core/autodiscovery/common/utils/prometheus_kubelet_test.go +++ b/comp/core/autodiscovery/common/utils/prometheus_kubelet_test.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/stretchr/testify/assert" @@ -515,7 +515,7 @@ func TestConfigsForPod(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config.Datadog().SetWithoutSource("prometheus_scrape.version", tt.version) + pkgconfigsetup.Datadog().SetWithoutSource("prometheus_scrape.version", tt.version) tt.check.Init(tt.version) assert.ElementsMatch(t, tt.want, ConfigsForPod(tt.check, tt.pod)) }) diff --git a/comp/core/autodiscovery/component.go b/comp/core/autodiscovery/component.go index ac97e99b05744..28d74363ffedc 100644 --- a/comp/core/autodiscovery/component.go +++ b/comp/core/autodiscovery/component.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/scheduler" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Component is the component type. @@ -26,7 +26,7 @@ type Component interface { ForceRanOnceFlag() HasRunOnce() bool GetAllConfigs() []integration.Config - AddListeners(listenerConfigs []config.Listeners) + AddListeners(listenerConfigs []pkgconfigsetup.Listeners) AddScheduler(name string, s scheduler.Scheduler, replayConfigs bool) RemoveScheduler(name string) MapOverLoadedConfigs(f func(map[string]integration.Config)) diff --git a/comp/core/autodiscovery/listeners/common.go b/comp/core/autodiscovery/listeners/common.go index e9f137895889a..07037631658fc 100644 --- a/comp/core/autodiscovery/listeners/common.go +++ b/comp/core/autodiscovery/listeners/common.go @@ -14,7 +14,7 @@ import ( "strconv" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -102,7 +102,7 @@ func (f *containerFilters) IsExcluded(filter containers.FilterType, annotations // getPrometheusIncludeAnnotations returns the Prometheus AD include annotations based on the Prometheus config func getPrometheusIncludeAnnotations() types.PrometheusAnnotations { annotations := types.PrometheusAnnotations{} - tmpConfigString := config.Datadog().GetString("prometheus_scrape.checks") + tmpConfigString := pkgconfigsetup.Datadog().GetString("prometheus_scrape.checks") var checks []*types.PrometheusCheck if len(tmpConfigString) > 0 { @@ -120,7 +120,7 @@ func getPrometheusIncludeAnnotations() types.PrometheusAnnotations { } for _, check := range checks { - if err := check.Init(config.Datadog().GetInt("prometheus_scrape.version")); err != nil { + if err := check.Init(pkgconfigsetup.Datadog().GetInt("prometheus_scrape.version")); err != nil { log.Errorf("Couldn't init check configuration: %v", err) continue } diff --git a/comp/core/autodiscovery/listeners/container.go b/comp/core/autodiscovery/listeners/container.go index f7afa5ec80cd5..6864d46e98019 100644 --- a/comp/core/autodiscovery/listeners/container.go +++ b/comp/core/autodiscovery/listeners/container.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" "github.com/DataDog/datadog-agent/pkg/util/docker" @@ -89,7 +89,7 @@ func (l *ContainerListener) createContainerService(entity workloadmeta.Entity) { // stopped. if !container.State.Running && !container.State.FinishedAt.IsZero() { finishedAt := container.State.FinishedAt - excludeAge := time.Duration(config.Datadog().GetInt("container_exclude_stopped_age")) * time.Hour + excludeAge := time.Duration(pkgconfigsetup.Datadog().GetInt("container_exclude_stopped_age")) * time.Hour if time.Since(finishedAt) > excludeAge { log.Debugf("container %q not running for too long, skipping", container.ID) return diff --git a/comp/core/autodiscovery/listeners/kubelet.go b/comp/core/autodiscovery/listeners/kubelet.go index 187401ea1ead0..94094f358eec1 100644 --- a/comp/core/autodiscovery/listeners/kubelet.go +++ b/comp/core/autodiscovery/listeners/kubelet.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -136,7 +136,7 @@ func (l *KubeletListener) createContainerService( // stopped. if !container.State.Running && !container.State.FinishedAt.IsZero() { finishedAt := container.State.FinishedAt - excludeAge := time.Duration(config.Datadog().GetInt("container_exclude_stopped_age")) * time.Hour + excludeAge := time.Duration(pkgconfigsetup.Datadog().GetInt("container_exclude_stopped_age")) * time.Hour if time.Since(finishedAt) > excludeAge { log.Debugf("container %q not running for too long, skipping", container.ID) return diff --git a/comp/core/autodiscovery/listeners/service.go b/comp/core/autodiscovery/listeners/service.go index f8aada6167d1d..841af2456caa8 100644 --- a/comp/core/autodiscovery/listeners/service.go +++ b/comp/core/autodiscovery/listeners/service.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -172,7 +172,7 @@ func (s *service) filterTemplatesOverriddenChecks(configs map[string]integration // added by the config provider (AddContainerCollectAllConfigs) if the service // has any other templates containing logs config. func (s *service) filterTemplatesContainerCollectAll(configs map[string]integration.Config) { - if !config.Datadog().GetBool("logs_config.container_collect_all") { + if !pkgconfigsetup.Datadog().GetBool("logs_config.container_collect_all") { return } diff --git a/comp/core/autodiscovery/listeners/snmp_test.go b/comp/core/autodiscovery/listeners/snmp_test.go index 55a12eafb101c..0e63b1a2e1376 100644 --- a/comp/core/autodiscovery/listeners/snmp_test.go +++ b/comp/core/autodiscovery/listeners/snmp_test.go @@ -10,8 +10,8 @@ import ( "strconv" "testing" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/snmp" "github.com/DataDog/datadog-agent/pkg/snmp/snmpintegration" @@ -43,7 +43,7 @@ func TestSNMPListener(t *testing.T) { } } - l, err := NewSNMPListener(&config.Listeners{}, nil) + l, err := NewSNMPListener(&pkgconfigsetup.Listeners{}, nil) assert.Equal(t, nil, err) l.Listen(newSvc, delSvc) @@ -142,7 +142,7 @@ func TestSNMPListenerIgnoredAdresses(t *testing.T) { } } - l, err := NewSNMPListener(&config.Listeners{}, nil) + l, err := NewSNMPListener(&pkgconfigsetup.Listeners{}, nil) assert.Equal(t, nil, err) l.Listen(newSvc, delSvc) diff --git a/comp/core/autodiscovery/listeners/staticconfig.go b/comp/core/autodiscovery/listeners/staticconfig.go index 9ff27685b9a69..0e537c49bf189 100644 --- a/comp/core/autodiscovery/listeners/staticconfig.go +++ b/comp/core/autodiscovery/listeners/staticconfig.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" ) @@ -51,7 +51,7 @@ func (l *StaticConfigListener) createServices() { "container_lifecycle", "sbom", } { - if enabled := config.Datadog().GetBool(staticCheck + ".enabled"); enabled { + if enabled := pkgconfigsetup.Datadog().GetBool(staticCheck + ".enabled"); enabled { l.newService <- &StaticConfigService{adIdentifier: "_" + staticCheck} } } diff --git a/comp/core/autodiscovery/providers/cloudfoundry.go b/comp/core/autodiscovery/providers/cloudfoundry.go index 3bfdd72d071da..0c6fb89c96d4a 100644 --- a/comp/core/autodiscovery/providers/cloudfoundry.go +++ b/comp/core/autodiscovery/providers/cloudfoundry.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -34,7 +34,7 @@ type CloudFoundryConfigProvider struct { } // NewCloudFoundryConfigProvider instantiates a new CloudFoundryConfigProvider from given config -func NewCloudFoundryConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewCloudFoundryConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { cfp := CloudFoundryConfigProvider{ lastCollected: time.Now(), } diff --git a/comp/core/autodiscovery/providers/cloudfoundry_nop.go b/comp/core/autodiscovery/providers/cloudfoundry_nop.go index 232dd534bd940..60894f3747d55 100644 --- a/comp/core/autodiscovery/providers/cloudfoundry_nop.go +++ b/comp/core/autodiscovery/providers/cloudfoundry_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewCloudFoundryConfigProvider instantiates a new CloudFoundryConfigProvider from given config -var NewCloudFoundryConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewCloudFoundryConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/clusterchecks.go b/comp/core/autodiscovery/providers/clusterchecks.go index d3c98c3a959f1..e8b3678abf170 100644 --- a/comp/core/autodiscovery/providers/clusterchecks.go +++ b/comp/core/autodiscovery/providers/clusterchecks.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ddErrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -42,9 +42,9 @@ type ClusterChecksConfigProvider struct { // NewClusterChecksConfigProvider returns a new ConfigProvider collecting // cluster check configurations from the cluster-agent. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewClusterChecksConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewClusterChecksConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { if providerConfig == nil { - providerConfig = &config.ConfigurationProviders{} + providerConfig = &pkgconfigsetup.ConfigurationProviders{} } c := &ClusterChecksConfigProvider{ @@ -52,11 +52,11 @@ func NewClusterChecksConfigProvider(providerConfig *config.ConfigurationProvider degradedDuration: defaultDegradedDeadline, } - c.identifier = config.Datadog().GetString("clc_runner_id") + c.identifier = pkgconfigsetup.Datadog().GetString("clc_runner_id") if c.identifier == "" { c.identifier, _ = hostname.Get(context.TODO()) - if config.Datadog().GetBool("cloud_foundry") { - boshID := config.Datadog().GetString("bosh_id") + if pkgconfigsetup.Datadog().GetBool("cloud_foundry") { + boshID := pkgconfigsetup.Datadog().GetString("bosh_id") if boshID == "" { log.Warn("configuration variable cloud_foundry is set to true, but bosh_id is empty, can't retrieve node name") } else { @@ -178,7 +178,7 @@ func (c *ClusterChecksConfigProvider) Collect(ctx context.Context) ([]integratio // This usually happens when scheduling a lot of checks on a CLC, especially larger checks // with `Configure()` implemented, like KSM Core and Orchestrator checks func (c *ClusterChecksConfigProvider) heartbeatSender(ctx context.Context) { - expirationTimeout := time.Duration(config.Datadog().GetInt("cluster_checks.node_expiration_timeout")) * time.Second + expirationTimeout := time.Duration(pkgconfigsetup.Datadog().GetInt("cluster_checks.node_expiration_timeout")) * time.Second heartTicker := time.NewTicker(time.Second) defer heartTicker.Stop() diff --git a/comp/core/autodiscovery/providers/config_reader.go b/comp/core/autodiscovery/providers/config_reader.go index 8b4621d0f9866..9f7b8cd13f8fd 100644 --- a/comp/core/autodiscovery/providers/config_reader.go +++ b/comp/core/autodiscovery/providers/config_reader.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/configresolver" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fargate" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -67,11 +67,11 @@ var doOnce sync.Once // InitConfigFilesReader should be called at agent startup. func InitConfigFilesReader(paths []string) { fileCacheExpiration := 5 * time.Minute - if config.Datadog().GetBool("autoconf_config_files_poll") { + if pkgconfigsetup.Datadog().GetBool("autoconf_config_files_poll") { // Removing some time (1s) to avoid races with polling interval. // If cache expiration is set to be == ticker interval the cache may be used if t1B (cache read time) - t0B (ticker time) < t1A (cache store time) - t0A (ticker time). // Which is likely to be the case because the code path on a cache write is slower. - configExpSeconds := config.Datadog().GetInt("autoconf_config_files_poll_interval") - 1 + configExpSeconds := pkgconfigsetup.Datadog().GetInt("autoconf_config_files_poll_interval") - 1 // If we are below < 1, cache is basically disabled, we cannot put 0 as it's considered no expiration by cache.Cache if configExpSeconds < 1 { fileCacheExpiration = time.Nanosecond @@ -243,7 +243,7 @@ func collectEntry(file os.DirEntry, path string, integrationName string, integra absPath := filepath.Join(path, fileName) // skip auto conf files based on the agent configuration - if fileName == "auto_conf.yaml" && containsString(config.Datadog().GetStringSlice("ignore_autoconf"), integrationName) { + if fileName == "auto_conf.yaml" && containsString(pkgconfigsetup.Datadog().GetStringSlice("ignore_autoconf"), integrationName) { log.Infof("Skipping 'auto_conf.yaml' for integration '%s'", integrationName) entry.err = fmt.Errorf("'auto_conf.yaml' for integration '%s' is skipped", integrationName) return entry, integrationErrors @@ -398,7 +398,7 @@ func GetIntegrationConfigFromFile(name, fpath string) (integration.Config, error if fargate.IsFargateInstance() { // In Fargate, since no host tags are applied in the backend, // add the configured DD_TAGS/DD_EXTRA_TAGS to the instance tags. - tags := configUtils.GetConfiguredTags(config.Datadog(), false) + tags := configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false) err := dataConf.MergeAdditionalTags(tags) if err != nil { log.Debugf("Could not add agent-level tags to instance of %v: %v", fpath, err) diff --git a/comp/core/autodiscovery/providers/consul.go b/comp/core/autodiscovery/providers/consul.go index 04957065793ff..107694e975c59 100644 --- a/comp/core/autodiscovery/providers/consul.go +++ b/comp/core/autodiscovery/providers/consul.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -53,9 +53,9 @@ type ConsulConfigProvider struct { } // NewConsulConfigProvider creates a client connection to consul and create a new ConsulConfigProvider -func NewConsulConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewConsulConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { if providerConfig == nil { - providerConfig = &config.ConfigurationProviders{} + providerConfig = &pkgconfigsetup.ConfigurationProviders{} } consulURL, err := url.Parse(providerConfig.TemplateURL) diff --git a/comp/core/autodiscovery/providers/consul_nop.go b/comp/core/autodiscovery/providers/consul_nop.go index 5a7272f3bc3c1..c38fdff9974cd 100644 --- a/comp/core/autodiscovery/providers/consul_nop.go +++ b/comp/core/autodiscovery/providers/consul_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewConsulConfigProvider creates a client connection to consul and create a new ConsulConfigProvider -var NewConsulConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewConsulConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/container.go b/comp/core/autodiscovery/providers/container.go index 7577fc2f89e03..846d967682b60 100644 --- a/comp/core/autodiscovery/providers/container.go +++ b/comp/core/autodiscovery/providers/container.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -34,7 +34,7 @@ type ContainerConfigProvider struct { // NewContainerConfigProvider returns a new ConfigProvider subscribed to both container // and pods -func NewContainerConfigProvider(_ *config.ConfigurationProviders, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) { +func NewContainerConfigProvider(_ *pkgconfigsetup.ConfigurationProviders, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) { return &ContainerConfigProvider{ workloadmetaStore: wmeta, configCache: make(map[string]map[string]integration.Config), diff --git a/comp/core/autodiscovery/providers/endpointschecks.go b/comp/core/autodiscovery/providers/endpointschecks.go index e6a55897c84cb..38a09c6c84006 100644 --- a/comp/core/autodiscovery/providers/endpointschecks.go +++ b/comp/core/autodiscovery/providers/endpointschecks.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" @@ -35,7 +35,7 @@ type EndpointsChecksConfigProvider struct { // NewEndpointsChecksConfigProvider returns a new ConfigProvider collecting // endpoints check configurations from the cluster-agent. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewEndpointsChecksConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewEndpointsChecksConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { c := &EndpointsChecksConfigProvider{ degradedDuration: defaultDegradedDeadline, } @@ -110,8 +110,8 @@ func (c *EndpointsChecksConfigProvider) Collect(ctx context.Context) ([]integrat // getNodename retrieves current node name from kubelet (if running on Kubernetes) // or bosh ID of current node (if running on Cloud Foundry). func getNodename(ctx context.Context) (string, error) { - if config.Datadog().GetBool("cloud_foundry") { - boshID := config.Datadog().GetString("bosh_id") + if pkgconfigsetup.Datadog().GetBool("cloud_foundry") { + boshID := pkgconfigsetup.Datadog().GetString("bosh_id") if boshID == "" { return "", fmt.Errorf("configuration variable cloud_foundry is set to true, but bosh_id is empty, can't retrieve node name") } diff --git a/comp/core/autodiscovery/providers/endpointschecks_nop.go b/comp/core/autodiscovery/providers/endpointschecks_nop.go index 6dc6386a834f9..3af1df32d23d1 100644 --- a/comp/core/autodiscovery/providers/endpointschecks_nop.go +++ b/comp/core/autodiscovery/providers/endpointschecks_nop.go @@ -9,10 +9,10 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewEndpointsChecksConfigProvider returns a new ConfigProvider collecting // endpoints check configurations from the cluster-agent. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -var NewEndpointsChecksConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewEndpointsChecksConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/etcd.go b/comp/core/autodiscovery/providers/etcd.go index 58178a0b6065d..bb08f9306f9d4 100644 --- a/comp/core/autodiscovery/providers/etcd.go +++ b/comp/core/autodiscovery/providers/etcd.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -37,9 +37,9 @@ type EtcdConfigProvider struct { } // NewEtcdConfigProvider creates a client connection to etcd and create a new EtcdConfigProvider -func NewEtcdConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewEtcdConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { if providerConfig == nil { - providerConfig = &config.ConfigurationProviders{} + providerConfig = &pkgconfigsetup.ConfigurationProviders{} } clientCfg := client.Config{ diff --git a/comp/core/autodiscovery/providers/etcd_nop.go b/comp/core/autodiscovery/providers/etcd_nop.go index b3e1f8675f8be..bee94b3beb8aa 100644 --- a/comp/core/autodiscovery/providers/etcd_nop.go +++ b/comp/core/autodiscovery/providers/etcd_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewEtcdConfigProvider creates a client connection to etcd and create a new EtcdConfigProvider -var NewEtcdConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewEtcdConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/file_test.go b/comp/core/autodiscovery/providers/file_test.go index eaed5fc0dfa39..021487c1ffc54 100644 --- a/comp/core/autodiscovery/providers/file_test.go +++ b/comp/core/autodiscovery/providers/file_test.go @@ -14,7 +14,7 @@ import ( acTelemetry "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/stretchr/testify/assert" @@ -22,7 +22,7 @@ import ( func TestCollect(t *testing.T) { ctx := context.Background() - config.Datadog().SetWithoutSource("ignore_autoconf", []string{"ignored"}) + pkgconfigsetup.Datadog().SetWithoutSource("ignore_autoconf", []string{"ignored"}) paths := []string{"tests", "foo/bar"} telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) diff --git a/comp/core/autodiscovery/providers/kube_endpoints.go b/comp/core/autodiscovery/providers/kube_endpoints.go index a5263d4be9fe9..1f8b72aa7b226 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints.go +++ b/comp/core/autodiscovery/providers/kube_endpoints.go @@ -22,7 +22,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -59,7 +60,7 @@ type configInfo struct { // NewKubeEndpointsConfigProvider returns a new ConfigProvider connected to apiserver. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewKubeEndpointsConfigProvider(_ *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) { +func NewKubeEndpointsConfigProvider(_ *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) { // Using GetAPIClient (no wait) as Client should already be initialized by Cluster Agent main entrypoint before ac, err := apiserver.GetAPIClient() if err != nil { @@ -99,7 +100,7 @@ func NewKubeEndpointsConfigProvider(_ *config.ConfigurationProviders, telemetryS return nil, fmt.Errorf("cannot add event handler to endpoint informer: %s", err) } - if config.Datadog().GetBool("cluster_checks.support_hybrid_ignore_ad_tags") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.support_hybrid_ignore_ad_tags") { log.Warnf("The `cluster_checks.support_hybrid_ignore_ad_tags` flag is" + " deprecated and will be removed in a future version. Please replace " + "`ad.datadoghq.com/endpoints.ignore_autodiscovery_tags` in your service annotations" + @@ -123,7 +124,7 @@ func (k *kubeEndpointsConfigProvider) Collect(context.Context) ([]integration.Co k.setUpToDate(true) var generatedConfigs []integration.Config - parsedConfigsInfo := k.parseServiceAnnotationsForEndpoints(services, config.Datadog()) + parsedConfigsInfo := k.parseServiceAnnotationsForEndpoints(services, pkgconfigsetup.Datadog()) for _, conf := range parsedConfigsInfo { kep, err := k.endpointsLister.Endpoints(conf.namespace).Get(conf.name) if err != nil { @@ -230,7 +231,7 @@ func (k *kubeEndpointsConfigProvider) setUpToDate(v bool) { k.upToDate = v } -func (k *kubeEndpointsConfigProvider) parseServiceAnnotationsForEndpoints(services []*v1.Service, cfg config.Config) []configInfo { +func (k *kubeEndpointsConfigProvider) parseServiceAnnotationsForEndpoints(services []*v1.Service, cfg model.Config) []configInfo { var configsInfo []configInfo setEndpointIDs := map[string]struct{}{} diff --git a/comp/core/autodiscovery/providers/kube_endpoints_file.go b/comp/core/autodiscovery/providers/kube_endpoints_file.go index 67b19d1d600c8..c9d1ceb568800 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints_file.go +++ b/comp/core/autodiscovery/providers/kube_endpoints_file.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -57,7 +57,7 @@ type KubeEndpointsFileConfigProvider struct { } // NewKubeEndpointsFileConfigProvider returns a new KubeEndpointsFileConfigProvider -func NewKubeEndpointsFileConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewKubeEndpointsFileConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { templates, _, err := ReadConfigFiles(WithAdvancedADOnly) if err != nil { return nil, err diff --git a/comp/core/autodiscovery/providers/kube_endpoints_file_nop.go b/comp/core/autodiscovery/providers/kube_endpoints_file_nop.go index a2e5b1ef58239..4a5b59aa8c4df 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints_file_nop.go +++ b/comp/core/autodiscovery/providers/kube_endpoints_file_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewKubeEndpointsFileConfigProvider returns a new KubeEndpointsFileConfigProvider -var NewKubeEndpointsFileConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewKubeEndpointsFileConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/kube_endpoints_nop.go b/comp/core/autodiscovery/providers/kube_endpoints_nop.go index a6004ed3cf91a..b17ed36295ea6 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints_nop.go +++ b/comp/core/autodiscovery/providers/kube_endpoints_nop.go @@ -9,9 +9,9 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewKubeEndpointsConfigProvider returns a new ConfigProvider connected to apiserver. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -var NewKubeEndpointsConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewKubeEndpointsConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/kube_endpoints_test.go b/comp/core/autodiscovery/providers/kube_endpoints_test.go index 3ea35201e26a0..d3891c5a65da0 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints_test.go +++ b/comp/core/autodiscovery/providers/kube_endpoints_test.go @@ -26,7 +26,7 @@ import ( acTelemetry "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -271,7 +271,7 @@ func TestParseKubeServiceAnnotationsForEndpoints(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) if tc.hybrid { cfg.SetWithoutSource("cluster_checks.support_hybrid_ignore_ad_tags", true) } diff --git a/comp/core/autodiscovery/providers/kube_services.go b/comp/core/autodiscovery/providers/kube_services.go index 53f9cf65aec98..6cb325916697a 100644 --- a/comp/core/autodiscovery/providers/kube_services.go +++ b/comp/core/autodiscovery/providers/kube_services.go @@ -21,7 +21,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -41,7 +42,7 @@ type KubeServiceConfigProvider struct { // NewKubeServiceConfigProvider returns a new ConfigProvider connected to apiserver. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewKubeServiceConfigProvider(_ *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) { +func NewKubeServiceConfigProvider(_ *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) { // Using GetAPIClient() (no retry) ac, err := apiserver.GetAPIClient() if err != nil { @@ -67,7 +68,7 @@ func NewKubeServiceConfigProvider(_ *config.ConfigurationProviders, telemetrySto return nil, fmt.Errorf("cannot add event handler to services informer: %s", err) } - if config.Datadog().GetBool("cluster_checks.support_hybrid_ignore_ad_tags") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.support_hybrid_ignore_ad_tags") { log.Warnf("The `cluster_checks.support_hybrid_ignore_ad_tags` flag is" + " deprecated and will be removed in a future version. Please replace " + "`ad.datadoghq.com/service.ignore_autodiscovery_tags` in your service annotations" + @@ -92,7 +93,7 @@ func (k *KubeServiceConfigProvider) Collect(ctx context.Context) ([]integration. } k.upToDate = true - return k.parseServiceAnnotations(services, config.Datadog()) + return k.parseServiceAnnotations(services, pkgconfigsetup.Datadog()) } // IsUpToDate allows to cache configs as long as no changes are detected in the apiserver @@ -162,7 +163,7 @@ func valuesDiffer(first, second map[string]string, prefix string) bool { return matchingInFirst != matchingInSecond } -func (k *KubeServiceConfigProvider) parseServiceAnnotations(services []*v1.Service, ddConf config.Config) ([]integration.Config, error) { +func (k *KubeServiceConfigProvider) parseServiceAnnotations(services []*v1.Service, ddConf model.Config) ([]integration.Config, error) { var configs []integration.Config setServiceIDs := map[string]struct{}{} diff --git a/comp/core/autodiscovery/providers/kube_services_file.go b/comp/core/autodiscovery/providers/kube_services_file.go index 721ed15e5d79a..15bcccde50a28 100644 --- a/comp/core/autodiscovery/providers/kube_services_file.go +++ b/comp/core/autodiscovery/providers/kube_services_file.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" ) @@ -22,7 +22,7 @@ type KubeServiceFileConfigProvider struct { } // NewKubeServiceFileConfigProvider returns a new KubeServiceFileConfigProvider -func NewKubeServiceFileConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewKubeServiceFileConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { return &KubeServiceFileConfigProvider{}, nil } diff --git a/comp/core/autodiscovery/providers/kube_services_file_nop.go b/comp/core/autodiscovery/providers/kube_services_file_nop.go index 8724b87313061..a5c5db3a700f2 100644 --- a/comp/core/autodiscovery/providers/kube_services_file_nop.go +++ b/comp/core/autodiscovery/providers/kube_services_file_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewKubeServiceFileConfigProvider returns a new KubeServiceFileConfigProvider -var NewKubeServiceFileConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewKubeServiceFileConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/kube_services_nop.go b/comp/core/autodiscovery/providers/kube_services_nop.go index 4f64323cb985c..f1941c07a7373 100644 --- a/comp/core/autodiscovery/providers/kube_services_nop.go +++ b/comp/core/autodiscovery/providers/kube_services_nop.go @@ -9,9 +9,9 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewKubeServiceConfigProvider returns a new ConfigProvider connected to apiserver. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -var NewKubeServiceConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewKubeServiceConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/kube_services_test.go b/comp/core/autodiscovery/providers/kube_services_test.go index 57da4676c08f1..1d395d937c163 100644 --- a/comp/core/autodiscovery/providers/kube_services_test.go +++ b/comp/core/autodiscovery/providers/kube_services_test.go @@ -26,7 +26,7 @@ import ( acTelemetry "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" ) @@ -241,7 +241,7 @@ func TestParseKubeServiceAnnotations(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) if tc.hybrid { cfg.SetWithoutSource("cluster_checks.support_hybrid_ignore_ad_tags", true) } diff --git a/comp/core/autodiscovery/providers/prometheus_common.go b/comp/core/autodiscovery/providers/prometheus_common.go index 1d75a2f1c65ac..a75657f578e17 100644 --- a/comp/core/autodiscovery/providers/prometheus_common.go +++ b/comp/core/autodiscovery/providers/prometheus_common.go @@ -7,14 +7,14 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) // getPrometheusConfigs reads and initializes the openmetrics checks from the configuration // It defines a default openmetrics instances with default AD if the checks configuration is empty func getPrometheusConfigs() ([]*types.PrometheusCheck, error) { - checks, err := types.PrometheusScrapeChecksTransformer(config.Datadog().GetString("prometheus_scrape.checks")) + checks, err := types.PrometheusScrapeChecksTransformer(pkgconfigsetup.Datadog().GetString("prometheus_scrape.checks")) if err != nil { return []*types.PrometheusCheck{}, err } @@ -26,7 +26,7 @@ func getPrometheusConfigs() ([]*types.PrometheusCheck, error) { validChecks := []*types.PrometheusCheck{} for i, check := range checks { - if err := check.Init(config.Datadog().GetInt("prometheus_scrape.version")); err != nil { + if err := check.Init(pkgconfigsetup.Datadog().GetInt("prometheus_scrape.version")); err != nil { log.Errorf("Ignoring check configuration (# %d): %v", i+1, err) continue } diff --git a/comp/core/autodiscovery/providers/prometheus_common_test.go b/comp/core/autodiscovery/providers/prometheus_common_test.go index 7611ee95faecf..bd610c5753168 100644 --- a/comp/core/autodiscovery/providers/prometheus_common_test.go +++ b/comp/core/autodiscovery/providers/prometheus_common_test.go @@ -11,7 +11,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/stretchr/testify/assert" ) @@ -198,7 +198,7 @@ func TestGetPrometheusConfigs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { confBytes, _ := json.Marshal(tt.config) - config.Datadog().SetWithoutSource("prometheus_scrape.checks", string(confBytes)) + pkgconfigsetup.Datadog().SetWithoutSource("prometheus_scrape.checks", string(confBytes)) checks, err := getPrometheusConfigs() if (err != nil) != tt.wantErr { t.Errorf("getPrometheusConfigs() error = %v, wantErr %v", err, tt.wantErr) diff --git a/comp/core/autodiscovery/providers/prometheus_pods.go b/comp/core/autodiscovery/providers/prometheus_pods.go index 37634a8dbade8..fe8ede52eaacc 100644 --- a/comp/core/autodiscovery/providers/prometheus_pods.go +++ b/comp/core/autodiscovery/providers/prometheus_pods.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" ) @@ -28,7 +28,7 @@ type PrometheusPodsConfigProvider struct { // NewPrometheusPodsConfigProvider returns a new Prometheus ConfigProvider connected to kubelet. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewPrometheusPodsConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewPrometheusPodsConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { checks, err := getPrometheusConfigs() if err != nil { return nil, err diff --git a/comp/core/autodiscovery/providers/prometheus_pods_nop.go b/comp/core/autodiscovery/providers/prometheus_pods_nop.go index e6ce37bbc9a27..3bf1ad0095f1d 100644 --- a/comp/core/autodiscovery/providers/prometheus_pods_nop.go +++ b/comp/core/autodiscovery/providers/prometheus_pods_nop.go @@ -9,9 +9,9 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewPrometheusPodsConfigProvider returns a new Prometheus ConfigProvider connected to kubelet. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -var NewPrometheusPodsConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewPrometheusPodsConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/prometheus_services.go b/comp/core/autodiscovery/providers/prometheus_services.go index 72d5a33b28326..7bc52e5f1f4ec 100644 --- a/comp/core/autodiscovery/providers/prometheus_services.go +++ b/comp/core/autodiscovery/providers/prometheus_services.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -66,7 +66,7 @@ type PrometheusServicesConfigProvider struct { } // NewPrometheusServicesConfigProvider returns a new Prometheus ConfigProvider connected to kube apiserver -func NewPrometheusServicesConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewPrometheusServicesConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { // Using GetAPIClient (no wait) as Client should already be initialized by Cluster Agent main entrypoint before ac, err := apiserver.GetAPIClient() if err != nil { @@ -81,7 +81,7 @@ func NewPrometheusServicesConfigProvider(*config.ConfigurationProviders, *teleme var endpointsInformer infov1.EndpointsInformer var endpointsLister listersv1.EndpointsLister - collectEndpoints := config.Datadog().GetBool("prometheus_scrape.service_endpoints") + collectEndpoints := pkgconfigsetup.Datadog().GetBool("prometheus_scrape.service_endpoints") if collectEndpoints { endpointsInformer = ac.InformerFactory.Core().V1().Endpoints() if endpointsInformer == nil { diff --git a/comp/core/autodiscovery/providers/prometheus_services_nop.go b/comp/core/autodiscovery/providers/prometheus_services_nop.go index 945daf2c80eee..eddd3ab680cae 100644 --- a/comp/core/autodiscovery/providers/prometheus_services_nop.go +++ b/comp/core/autodiscovery/providers/prometheus_services_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewPrometheusServicesConfigProvider returns a new Prometheus ConfigProvider connected to kube apiserver -var NewPrometheusServicesConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewPrometheusServicesConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/prometheus_services_test.go b/comp/core/autodiscovery/providers/prometheus_services_test.go index bc8b9a99b9d1d..9fbb2fd47b7f6 100644 --- a/comp/core/autodiscovery/providers/prometheus_services_test.go +++ b/comp/core/autodiscovery/providers/prometheus_services_test.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -259,7 +259,7 @@ func TestPrometheusServicesCollect(t *testing.T) { }, } - config.Datadog().SetWithoutSource("prometheus_scrape.version", 2) + pkgconfigsetup.Datadog().SetWithoutSource("prometheus_scrape.version", 2) for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() diff --git a/comp/core/autodiscovery/providers/providers.go b/comp/core/autodiscovery/providers/providers.go index 9336bb21d74fc..5eb4b3b2cf853 100644 --- a/comp/core/autodiscovery/providers/providers.go +++ b/comp/core/autodiscovery/providers/providers.go @@ -12,17 +12,17 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) // RegisterProvider adds a loader to the providers catalog func RegisterProvider(name string, - factory func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error), + factory func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error), providerCatalog map[string]ConfigProviderFactory) { RegisterProviderWithComponents( name, - func(providerConfig *config.ConfigurationProviders, _ workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) { + func(providerConfig *pkgconfigsetup.ConfigurationProviders, _ workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) { return factory(providerConfig, telemetryStore) }, providerCatalog, @@ -61,7 +61,7 @@ func RegisterProviders(providerCatalog map[string]ConfigProviderFactory) { } // ConfigProviderFactory is any function capable to create a ConfigProvider instance -type ConfigProviderFactory func(providerConfig *config.ConfigurationProviders, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) +type ConfigProviderFactory func(providerConfig *pkgconfigsetup.ConfigurationProviders, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) // ConfigProvider represents a source of `integration.Config` values // that can either be applied immediately or resolved for a service and diff --git a/comp/core/autodiscovery/providers/remote_config.go b/comp/core/autodiscovery/providers/remote_config.go index 19dd0907e4e23..24b00bbd68b33 100644 --- a/comp/core/autodiscovery/providers/remote_config.go +++ b/comp/core/autodiscovery/providers/remote_config.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -96,7 +96,7 @@ func (rc *RemoteConfigProvider) IntegrationScheduleCallback(updates map[string]s defer rc.mu.Unlock() var err error - allowedIntegration := config.GetRemoteConfigurationAllowedIntegrations(config.Datadog()) + allowedIntegration := pkgconfigsetup.GetRemoteConfigurationAllowedIntegrations(pkgconfigsetup.Datadog()) newCache := make(map[string]integration.Config, 0) // Now schedule everything diff --git a/comp/core/autodiscovery/providers/utils.go b/comp/core/autodiscovery/providers/utils.go index cf637776c825e..1abdc10007028 100644 --- a/comp/core/autodiscovery/providers/utils.go +++ b/comp/core/autodiscovery/providers/utils.go @@ -10,7 +10,7 @@ import ( "strconv" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ( @@ -26,20 +26,20 @@ const ( ) func buildStoreKey(key ...string) string { - parts := []string{config.Datadog().GetString("autoconf_template_dir")} + parts := []string{pkgconfigsetup.Datadog().GetString("autoconf_template_dir")} parts = append(parts, key...) return path.Join(parts...) } // GetPollInterval computes the poll interval from the config -func GetPollInterval(cp config.ConfigurationProviders) time.Duration { +func GetPollInterval(cp pkgconfigsetup.ConfigurationProviders) time.Duration { if cp.PollInterval != "" { customInterval, err := time.ParseDuration(cp.PollInterval) if err == nil { return customInterval } } - return config.Datadog().GetDuration("ad_config_poll_interval") * time.Second + return pkgconfigsetup.Datadog().GetDuration("ad_config_poll_interval") * time.Second } // providerCache supports monitoring a service for changes either to the number diff --git a/comp/core/autodiscovery/providers/utils_test.go b/comp/core/autodiscovery/providers/utils_test.go index 23fc5fc9d112a..8357cc46e8c4d 100644 --- a/comp/core/autodiscovery/providers/utils_test.go +++ b/comp/core/autodiscovery/providers/utils_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestBuildStoreKey(t *testing.T) { @@ -28,13 +28,13 @@ func TestBuildStoreKey(t *testing.T) { } func TestGetPollInterval(t *testing.T) { - cp := config.ConfigurationProviders{} + cp := pkgconfigsetup.ConfigurationProviders{} assert.Equal(t, GetPollInterval(cp), 10*time.Second) - cp = config.ConfigurationProviders{ + cp = pkgconfigsetup.ConfigurationProviders{ PollInterval: "foo", } assert.Equal(t, GetPollInterval(cp), 10*time.Second) - cp = config.ConfigurationProviders{ + cp = pkgconfigsetup.ConfigurationProviders{ PollInterval: "1s", } assert.Equal(t, GetPollInterval(cp), 1*time.Second) diff --git a/comp/core/autodiscovery/providers/zookeeper.go b/comp/core/autodiscovery/providers/zookeeper.go index e9da1ed8c515e..7815bcd9011aa 100644 --- a/comp/core/autodiscovery/providers/zookeeper.go +++ b/comp/core/autodiscovery/providers/zookeeper.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -41,9 +41,9 @@ type ZookeeperConfigProvider struct { } // NewZookeeperConfigProvider returns a new Client connected to a Zookeeper backend. -func NewZookeeperConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewZookeeperConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { if providerConfig == nil { - providerConfig = &config.ConfigurationProviders{} + providerConfig = &pkgconfigsetup.ConfigurationProviders{} } urls := strings.Split(providerConfig.TemplateURL, ",") diff --git a/comp/core/autodiscovery/providers/zookeeper_nop.go b/comp/core/autodiscovery/providers/zookeeper_nop.go index 91306ec420ad4..30c86050595a4 100644 --- a/comp/core/autodiscovery/providers/zookeeper_nop.go +++ b/comp/core/autodiscovery/providers/zookeeper_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewZookeeperConfigProvider returns a new Client connected to a Zookeeper backend. -var NewZookeeperConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewZookeeperConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/config/go.mod b/comp/core/config/go.mod index 2832dc70d1edd..619752c738b85 100644 --- a/comp/core/config/go.mod +++ b/comp/core/config/go.mod @@ -14,7 +14,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model/ + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/telemetry => ../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem @@ -52,6 +54,8 @@ require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect @@ -88,7 +92,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -105,12 +109,12 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/core/config/go.sum b/comp/core/config/go.sum index 35de5be41fa6e..b9db3b9f594bd 100644 --- a/comp/core/config/go.sum +++ b/comp/core/config/go.sum @@ -186,8 +186,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -261,15 +262,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -305,8 +306,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -322,8 +323,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/comp/core/gui/guiimpl/checks.go b/comp/core/gui/guiimpl/checks.go index c72a4e8f39c64..f57acb9cbac1d 100644 --- a/comp/core/gui/guiimpl/checks.go +++ b/comp/core/gui/guiimpl/checks.go @@ -28,19 +28,19 @@ import ( pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" checkstats "github.com/DataDog/datadog-agent/pkg/collector/check/stats" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) var ( configPaths = []string{ - config.Datadog().GetString("confd_path"), // Custom checks - filepath.Join(path.GetDistPath(), "conf.d"), // Default check configs + pkgconfigsetup.Datadog().GetString("confd_path"), // Custom checks + filepath.Join(path.GetDistPath(), "conf.d"), // Default check configs } checkPaths = []string{ - filepath.Join(path.GetDistPath(), "checks.d"), // Custom checks - config.Datadog().GetString("additional_checksd"), // Custom checks + filepath.Join(path.GetDistPath(), "checks.d"), // Custom checks + pkgconfigsetup.Datadog().GetString("additional_checksd"), // Custom checks path.PyChecksPath, // Integrations-core checks } ) @@ -247,7 +247,7 @@ func setCheckConfigFile(w http.ResponseWriter, r *http.Request) { var checkConfFolderPath, defaultCheckConfFolderPath string if checkFolder != "" { - checkConfFolderPath, err = securejoin.SecureJoin(config.Datadog().GetString("confd_path"), checkFolder) + checkConfFolderPath, err = securejoin.SecureJoin(pkgconfigsetup.Datadog().GetString("confd_path"), checkFolder) if err != nil { http.Error(w, "invalid checkFolder path", http.StatusBadRequest) log.Errorf("Error: Unable to join provided \"confd_path\" setting path with checkFolder: %s", err.Error()) @@ -260,7 +260,7 @@ func setCheckConfigFile(w http.ResponseWriter, r *http.Request) { return } } else { - checkConfFolderPath = config.Datadog().GetString("confd_path") + checkConfFolderPath = pkgconfigsetup.Datadog().GetString("confd_path") defaultCheckConfFolderPath = filepath.Join(path.GetDistPath(), "conf.d") } @@ -352,7 +352,7 @@ func getWheelsChecks() ([]string, error) { } for _, integration := range integrations { - if _, ok := config.StandardJMXIntegrations[integration]; !ok { + if _, ok := pkgconfigsetup.StandardJMXIntegrations[integration]; !ok { pyChecks = append(pyChecks, integration) } } @@ -391,7 +391,7 @@ func listChecks(w http.ResponseWriter, _ *http.Request) { integrations = append(integrations, goIntegrations...) // Get jmx-checks - for integration := range config.StandardJMXIntegrations { + for integration := range pkgconfigsetup.StandardJMXIntegrations { integrations = append(integrations, integration) } diff --git a/comp/core/hostname/hostnameimpl/service_test.go b/comp/core/hostname/hostnameimpl/service_test.go index 1b2177f30b080..29d6626c292dd 100644 --- a/comp/core/hostname/hostnameimpl/service_test.go +++ b/comp/core/hostname/hostnameimpl/service_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/comp/core/hostname" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/stretchr/testify/assert" @@ -21,9 +21,9 @@ func TestGet(t *testing.T) { t.Cleanup(func() { // erase cache cache.Cache.Delete(cache.BuildAgentKey("hostname")) - config.Datadog().SetWithoutSource("hostname", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") }) - config.Datadog().SetWithoutSource("hostname", "test-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "test-hostname") s := fxutil.Test[hostname.Component](t, Module()) name, err := s.Get(context.Background()) require.NoError(t, err) @@ -34,9 +34,9 @@ func TestGetWithProvider(t *testing.T) { t.Cleanup(func() { // erase cache) cache.Cache.Delete(cache.BuildAgentKey("hostname")) - config.Datadog().SetWithoutSource("hostname", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") }) - config.Datadog().SetWithoutSource("hostname", "test-hostname2") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "test-hostname2") s := fxutil.Test[hostname.Component](t, Module()) data, err := s.GetWithProvider(context.Background()) require.NoError(t, err) diff --git a/comp/core/hostname/remotehostnameimpl/hostname.go b/comp/core/hostname/remotehostnameimpl/hostname.go index 7732f9a4fbe84..6bb3d3273a810 100644 --- a/comp/core/hostname/remotehostnameimpl/hostname.go +++ b/comp/core/hostname/remotehostnameimpl/hostname.go @@ -12,7 +12,7 @@ import ( "github.com/avast/retry-go/v4" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/grpc" @@ -94,12 +94,12 @@ func getHostnameWithContext(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - client, err := grpc.GetDDAgentClient(ctx, ipcAddress, config.GetIPCPort()) + client, err := grpc.GetDDAgentClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } diff --git a/comp/core/log/impl-trace/go.mod b/comp/core/log/impl-trace/go.mod index 85668bb4ca4a0..c05eed2b273e9 100644 --- a/comp/core/log/impl-trace/go.mod +++ b/comp/core/log/impl-trace/go.mod @@ -16,7 +16,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/obfuscate => ../../../../pkg/obfuscate github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto github.com/DataDog/datadog-agent/pkg/remoteconfig/state => ../../../../pkg/remoteconfig/state @@ -61,7 +63,9 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect @@ -90,7 +94,7 @@ require ( github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/shirou/gopsutil/v3 v3.24.4 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -102,12 +106,12 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/log/impl-trace/go.sum b/comp/core/log/impl-trace/go.sum index 741210f9d5bf8..6bf5424fe1b01 100644 --- a/comp/core/log/impl-trace/go.sum +++ b/comp/core/log/impl-trace/go.sum @@ -181,8 +181,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -255,15 +256,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -299,8 +300,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -316,8 +317,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/comp/core/log/impl/go.mod b/comp/core/log/impl/go.mod index 427c90bd25c8f..1575492c8f147 100644 --- a/comp/core/log/impl/go.mod +++ b/comp/core/log/impl/go.mod @@ -2,59 +2,37 @@ module github.com/DataDog/datadog-agent/comp/core/log/impl go 1.22.0 -replace github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber - -replace github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer - -replace github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../../comp/core/flare/types - -replace github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional - -replace github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket - -replace github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem - -replace github.com/DataDog/datadog-agent/comp/core/secrets => ../../../../comp/core/secrets - -replace github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults - -replace github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system - -replace github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate - -replace github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../../comp/core/flare/builder - -replace github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock - -replace github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env - -replace github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil - -replace github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup - -replace github.com/DataDog/datadog-agent/comp/def => ../../../../comp/def - -replace github.com/DataDog/datadog-agent/comp/core/config => ../../../../comp/core/config - -replace github.com/DataDog/datadog-agent/comp/core/log/def => ../../../../comp/core/log/def - -replace github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log - -replace github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup - -replace github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model - -replace github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil - -replace github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable - -replace github.com/DataDog/datadog-agent/cmd/agent/common/path => ../../../../cmd/agent/common/path - -replace github.com/DataDog/datadog-agent/comp/api/api/def => ../../../../comp/api/api/def - -replace github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../../comp/core/telemetry - -replace github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil +replace ( + github.com/DataDog/datadog-agent/cmd/agent/common/path => ../../../../cmd/agent/common/path + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../../comp/api/api/def + github.com/DataDog/datadog-agent/comp/core/config => ../../../../comp/core/config + github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../../comp/core/flare/builder + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../../comp/core/flare/types + github.com/DataDog/datadog-agent/comp/core/log/def => ../../../../comp/core/log/def + github.com/DataDog/datadog-agent/comp/core/secrets => ../../../../comp/core/secrets + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../../comp/core/telemetry + github.com/DataDog/datadog-agent/comp/def => ../../../../comp/def + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock + github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel + github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup + github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil +) require ( github.com/DataDog/datadog-agent/comp/core/config v0.0.0-00010101000000-000000000000 @@ -73,7 +51,9 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect @@ -103,7 +83,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -116,12 +96,12 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/log/impl/go.sum b/comp/core/log/impl/go.sum index c0f06ba5f32fa..b8178bc54e8ad 100644 --- a/comp/core/log/impl/go.sum +++ b/comp/core/log/impl/go.sum @@ -182,8 +182,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -255,15 +256,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -299,8 +300,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -316,8 +317,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/comp/core/status/statusimpl/go.mod b/comp/core/status/statusimpl/go.mod index a144e2c7ab101..a93d869629572 100644 --- a/comp/core/status/statusimpl/go.mod +++ b/comp/core/status/statusimpl/go.mod @@ -18,7 +18,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/telemetry => ../../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem @@ -62,6 +64,8 @@ require ( github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect @@ -109,8 +113,8 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/sys v0.25.0 // indirect google.golang.org/protobuf v1.34.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/core/status/statusimpl/go.sum b/comp/core/status/statusimpl/go.sum index 7099150b7bf3c..e24cd787408c4 100644 --- a/comp/core/status/statusimpl/go.sum +++ b/comp/core/status/statusimpl/go.sum @@ -266,8 +266,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -305,8 +305,8 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= diff --git a/comp/core/sysprobeconfig/component.go b/comp/core/sysprobeconfig/component.go index c5484b2d9d856..5a87cfdf04115 100644 --- a/comp/core/sysprobeconfig/component.go +++ b/comp/core/sysprobeconfig/component.go @@ -16,21 +16,22 @@ package sysprobeconfig import ( + "go.uber.org/fx" + sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" - "go.uber.org/fx" ) // team: ebpf-platform // Component is the component type. type Component interface { - config.ReaderWriter + model.ReaderWriter // Warnings returns config warnings collected during setup. - Warnings() *config.Warnings + Warnings() *model.Warnings // SysProbeObject returns the wrapper sysconfig SysProbeObject() *sysconfigtypes.Config diff --git a/comp/core/sysprobeconfig/sysprobeconfigimpl/config.go b/comp/core/sysprobeconfig/sysprobeconfigimpl/config.go index ba5d6bca73bb8..83582e117cda1 100644 --- a/comp/core/sysprobeconfig/sysprobeconfigimpl/config.go +++ b/comp/core/sysprobeconfig/sysprobeconfigimpl/config.go @@ -13,7 +13,8 @@ import ( sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -29,12 +30,12 @@ func Module() fxutil.Module { type cfg struct { // this component is currently implementing a thin wrapper around pkg/config, // and uses globals in that package. - config.Config + model.Config syscfg *sysconfigtypes.Config // warnings are the warnings generated during setup - warnings *config.Warnings + warnings *model.Warnings } // sysprobeconfigDependencies is an interface that mimics the fx-oriented dependencies struct (This is copied from the main agent configuration.) @@ -64,14 +65,14 @@ func newConfig(deps dependencies) (sysprobeconfig.Component, error) { return nil, err } - return &cfg{Config: config.SystemProbe(), syscfg: syscfg}, nil + return &cfg{Config: pkgconfigsetup.SystemProbe(), syscfg: syscfg}, nil } -func (c *cfg) Warnings() *config.Warnings { +func (c *cfg) Warnings() *model.Warnings { return c.warnings } -func (c *cfg) Object() config.Reader { +func (c *cfg) Object() model.Reader { return c } diff --git a/comp/core/tagger/component.go b/comp/core/tagger/component.go index 8e325161f84ac..7b54c278d42ec 100644 --- a/comp/core/tagger/component.go +++ b/comp/core/tagger/component.go @@ -43,8 +43,8 @@ type Component interface { Standard(entityID string) ([]string, error) List() types.TaggerListResponse GetEntity(entityID string) (*types.Entity, error) - Subscribe(cardinality types.TagCardinality) chan []types.EntityEvent - Unsubscribe(ch chan []types.EntityEvent) + // subscriptionID is used for logging and debugging purposes + Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) GetEntityHash(entityID string, cardinality types.TagCardinality) string AgentTags(cardinality types.TagCardinality) ([]string, error) GlobalTags(cardinality types.TagCardinality) ([]string, error) diff --git a/comp/core/tagger/noopimpl/tagger.go b/comp/core/tagger/noopimpl/tagger.go index 9563d90fd4000..b689dd980299c 100644 --- a/comp/core/tagger/noopimpl/tagger.go +++ b/comp/core/tagger/noopimpl/tagger.go @@ -15,6 +15,7 @@ package noopimpl import ( "context" + "fmt" "go.uber.org/fx" @@ -73,12 +74,10 @@ func (n *noopTagger) GetEntity(string) (*types.Entity, error) { return nil, nil } -func (n *noopTagger) Subscribe(types.TagCardinality) chan []types.EntityEvent { - return make(chan []types.EntityEvent) +func (n *noopTagger) Subscribe(string, *types.Filter) (types.Subscription, error) { + return nil, fmt.Errorf("not implemented") } -func (n *noopTagger) Unsubscribe(chan []types.EntityEvent) {} - func (n *noopTagger) GetEntityHash(string, types.TagCardinality) string { return "" } diff --git a/comp/core/tagger/params.go b/comp/core/tagger/params.go index 3e4482d6594ec..6b1bead6bf503 100644 --- a/comp/core/tagger/params.go +++ b/comp/core/tagger/params.go @@ -7,7 +7,7 @@ package tagger import ( "github.com/DataDog/datadog-agent/comp/core/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // AgentTypeForTagger represents agent types that tagger is used for @@ -29,7 +29,7 @@ type Params struct { // NewTaggerParamsForCoreAgent is a constructor function for creating core agent tagger params func NewTaggerParamsForCoreAgent(_ config.Component) Params { - if pkgconfig.IsCLCRunner() { + if pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()) { return NewCLCRunnerRemoteTaggerParams() } return NewTaggerParams() diff --git a/comp/core/tagger/taggerimpl/collectors/ecs_common.go b/comp/core/tagger/taggerimpl/collectors/ecs_common.go index 161a92e09fa92..c4170ad398b15 100644 --- a/comp/core/tagger/taggerimpl/collectors/ecs_common.go +++ b/comp/core/tagger/taggerimpl/collectors/ecs_common.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/tagger/taglist" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func addResourceTags(t *taglist.TagList, m map[string]string) { @@ -19,7 +19,7 @@ func addResourceTags(t *taglist.TagList, m map[string]string) { continue } - if config.Datadog().GetBool("ecs_resource_tags_replace_colon") { + if pkgconfigsetup.Datadog().GetBool("ecs_resource_tags_replace_colon") { k = strings.ReplaceAll(k, ":", "_") } diff --git a/comp/core/tagger/taggerimpl/collectors/ecs_common_test.go b/comp/core/tagger/taggerimpl/collectors/ecs_common_test.go index 1826cd343beaf..488302b108e06 100644 --- a/comp/core/tagger/taggerimpl/collectors/ecs_common_test.go +++ b/comp/core/tagger/taggerimpl/collectors/ecs_common_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/comp/core/tagger/taglist" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/stretchr/testify/assert" ) @@ -51,10 +51,10 @@ func TestAddResourceTags(t *testing.T) { expectedTags.AddLow("environment", "sandbox") expectedTags.AddLow("project", "ecs-test") expectedTags.AddLow("foo_bar_baz", "val") - config.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", true) + pkgconfigsetup.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", true) return expectedTags }, - resetFunc: func() { config.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", false) }, + resetFunc: func() { pkgconfigsetup.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", false) }, }, { name: "replace colon enabled, do not replace tag value", @@ -70,10 +70,10 @@ func TestAddResourceTags(t *testing.T) { expectedTags.AddLow("environment", "sandbox") expectedTags.AddLow("project", "ecs-test") expectedTags.AddLow("foo_bar_baz", "val1:val2") - config.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", true) + pkgconfigsetup.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", true) return expectedTags }, - resetFunc: func() { config.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", false) }, + resetFunc: func() { pkgconfigsetup.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", false) }, }, { name: "replace colon disabled", diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go index d7753c7f495f2..dad04320b2ca5 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/tags" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -360,7 +360,7 @@ func (c *WorkloadMetaCollector) handleKubePod(ev workloadmeta.Event) []*types.Ta } kubeServiceDisabled := false - for _, disabledTag := range config.Datadog().GetStringSlice("kubernetes_ad_tags_disabled") { + for _, disabledTag := range pkgconfigsetup.Datadog().GetStringSlice("kubernetes_ad_tags_disabled") { if disabledTag == "kube_service" { kubeServiceDisabled = true break @@ -447,7 +447,7 @@ func (c *WorkloadMetaCollector) handleECSTask(ev workloadmeta.Event) []*types.Ta taskTags.AddOrchestrator(tags.TaskARN, task.ID) if task.ClusterName != "" { - if !config.Datadog().GetBool("disable_cluster_name_tag_key") { + if !pkgconfigsetup.Datadog().GetBool("disable_cluster_name_tag_key") { taskTags.AddLow(tags.ClusterName, task.ClusterName) } taskTags.AddLow(tags.EcsClusterName, task.ClusterName) @@ -462,6 +462,10 @@ func (c *WorkloadMetaCollector) handleECSTask(ev workloadmeta.Event) []*types.Ta addResourceTags(taskTags, task.Tags) } + if task.ServiceName != "" { + taskTags.AddLow(tags.EcsServiceName, strings.ToLower(task.ServiceName)) + } + tagInfos := make([]*types.TagInfo, 0, len(task.Containers)) for _, taskContainer := range task.Containers { container, err := c.store.GetContainer(taskContainer.ID) diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go index 63c2a66adc4af..c2ab0bb1714d8 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_test.go @@ -1426,6 +1426,7 @@ func TestHandleECSTask(t *testing.T) { Name: containerName, }, }, + ServiceName: "datadog-agent-service", }, expected: []*types.TagInfo{ { @@ -1444,6 +1445,7 @@ func TestHandleECSTask(t *testing.T) { "task_family:datadog-agent", "task_name:datadog-agent", "task_version:1", + "ecs_service:datadog-agent-service", }, StandardTags: []string{}, }, diff --git a/comp/core/tagger/taggerimpl/generic_store/composite_store.go b/comp/core/tagger/taggerimpl/generic_store/composite_store.go index 13215d6fea371..1428079ea286c 100644 --- a/comp/core/tagger/taggerimpl/generic_store/composite_store.go +++ b/comp/core/tagger/taggerimpl/generic_store/composite_store.go @@ -5,7 +5,9 @@ package genericstore -import "github.com/DataDog/datadog-agent/comp/core/tagger/types" +import ( + "github.com/DataDog/datadog-agent/comp/core/tagger/types" +) type compositeObjectStore[T any] struct { data map[types.EntityIDPrefix]map[string]T @@ -30,6 +32,16 @@ func (os *compositeObjectStore[T]) Get(entityID types.EntityID) (object T, found return } +// GetWithEntityIDStr implements ObjectStore#GetWithEntityIDStr +func (os *compositeObjectStore[T]) GetWithEntityIDStr(id string) (object T, found bool) { + entityID, err := types.NewEntityIDFromString(id) + if err != nil { + return + } + + return os.Get(entityID) +} + // Set implements ObjectStore#Set func (os *compositeObjectStore[T]) Set(entityID types.EntityID, object T) { prefix := entityID.GetPrefix() @@ -64,10 +76,11 @@ func (os *compositeObjectStore[T]) Size() int { } // ListObjects implements ObjectStore#ListObjects -func (os *compositeObjectStore[T]) ListObjects() []T { +func (os *compositeObjectStore[T]) ListObjects(filter *types.Filter) []T { objects := make([]T, 0, os.Size()) - for _, idToObjects := range os.data { + for prefix := range filter.GetPrefixes() { + idToObjects := os.data[prefix] for _, object := range idToObjects { objects = append(objects, object) } @@ -77,8 +90,9 @@ func (os *compositeObjectStore[T]) ListObjects() []T { } // ForEach implements ObjectStore#ForEach -func (os *compositeObjectStore[T]) ForEach(apply types.ApplyFunc[T]) { - for prefix, idToObjects := range os.data { +func (os *compositeObjectStore[T]) ForEach(filter *types.Filter, apply types.ApplyFunc[T]) { + for prefix := range filter.GetPrefixes() { + idToObjects := os.data[prefix] for id, object := range idToObjects { apply(types.NewEntityID(prefix, id), object) } diff --git a/comp/core/tagger/taggerimpl/generic_store/default_store.go b/comp/core/tagger/taggerimpl/generic_store/default_store.go index d0112b8c0c8eb..e51028138bb2a 100644 --- a/comp/core/tagger/taggerimpl/generic_store/default_store.go +++ b/comp/core/tagger/taggerimpl/generic_store/default_store.go @@ -19,6 +19,14 @@ func (os defaulObjectStore[T]) Get(entityID types.EntityID) (object T, found boo return obj, found } +// GetWithEntityIDStr implements ObjectStore#GetWithEntityIDStr +func (os defaulObjectStore[T]) GetWithEntityIDStr(id string) (object T, found bool) { + // This store is only meant to be used with IDs of type "defaultEntityID" + // that's why we can call "NewDefaultEntityIDFromStr" + obj, found := os[types.NewDefaultEntityIDFromStr(id)] + return obj, found +} + // Set implements ObjectStore#Set func (os defaulObjectStore[T]) Set(entityID types.EntityID, object T) { os[entityID] = object @@ -35,19 +43,35 @@ func (os defaulObjectStore[T]) Size() int { } // ListObjects implements ObjectStore#ListObjects -func (os defaulObjectStore[T]) ListObjects() []T { +func (os defaulObjectStore[T]) ListObjects(filter *types.Filter) []T { objects := make([]T, 0) - for _, object := range os { - objects = append(objects, object) + if filter == nil { + for _, object := range os { + objects = append(objects, object) + } + } else { + for entityID, object := range os { + if filter.MatchesPrefix(entityID.GetPrefix()) { + objects = append(objects, object) + } + } } return objects } // ForEach implements ObjectStore#ForEach -func (os defaulObjectStore[T]) ForEach(apply types.ApplyFunc[T]) { - for id, object := range os { - apply(id, object) +func (os defaulObjectStore[T]) ForEach(filter *types.Filter, apply types.ApplyFunc[T]) { + if filter == nil { + for id, object := range os { + apply(id, object) + } + } else { + for id, object := range os { + if filter.MatchesPrefix(id.GetPrefix()) { + apply(id, object) + } + } } } diff --git a/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go b/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go index 13715c69de459..0a07b142ee1b3 100644 --- a/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go +++ b/comp/core/tagger/taggerimpl/generic_store/store_bench_test.go @@ -14,7 +14,7 @@ import ( configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) -const samples int = 1000000 +const samples int = 10000000 var weightedPrefixes = map[string]int{ "container_image_metadata": 60, @@ -24,7 +24,7 @@ var weightedPrefixes = map[string]int{ "deployment": 15, "kubernetes_metadata": 30, "kubernetes_pod_uid": 30, - "process": 30, + "process": 60, } // getWeightedPrefix selects a prefix based on the provided weights. @@ -55,6 +55,19 @@ func initStore(store types.ObjectStore[int]) { } } +func initFilter() *types.Filter { + fb := types.NewFilterBuilder() + + numberOfPrefixes := rand.Intn(len(weightedPrefixes)) + + for range numberOfPrefixes { + prefix := getNextPrefix() + fb.Include(prefix) + } + + return fb.Build(types.HighCardinality) +} + // Mock ApplyFunc for testing purposes func mockApplyFunc[T any](_ types.EntityID, _ T) {} @@ -166,7 +179,10 @@ func BenchmarkDefaultObjectStore_ForEach(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - store.ForEach(mockApplyFunc[int]) + b.StopTimer() + filter := initFilter() + b.StartTimer() + store.ForEach(filter, mockApplyFunc[int]) } } @@ -178,11 +194,14 @@ func BenchmarkCompositeObjectStore_ForEach(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - store.ForEach(mockApplyFunc[int]) + b.StopTimer() + filter := initFilter() + b.StartTimer() + store.ForEach(filter, mockApplyFunc[int]) } } -func BenchmarkDefaultObjectStore_ListAll(b *testing.B) { +func BenchmarkDefaultObjectStore_ListObjects(b *testing.B) { cfg := configmock.New(b) cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", false) store := NewObjectStore[int](cfg) @@ -191,11 +210,14 @@ func BenchmarkDefaultObjectStore_ListAll(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _ = store.ListObjects() + b.StopTimer() + filter := initFilter() + b.StartTimer() + _ = store.ListObjects(filter) } } -func BenchmarkCompositeObjectStore_ListAll(b *testing.B) { +func BenchmarkCompositeObjectStore_ListObjects(b *testing.B) { cfg := configmock.New(b) cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", true) store := NewObjectStore[int](cfg) @@ -204,6 +226,9 @@ func BenchmarkCompositeObjectStore_ListAll(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _ = store.ListObjects() + b.StopTimer() + filter := initFilter() + b.StartTimer() + _ = store.ListObjects(filter) } } diff --git a/comp/core/tagger/taggerimpl/generic_store/store_test.go b/comp/core/tagger/taggerimpl/generic_store/store_test.go index 0a72a87fecb2a..df79751f74218 100644 --- a/comp/core/tagger/taggerimpl/generic_store/store_test.go +++ b/comp/core/tagger/taggerimpl/generic_store/store_test.go @@ -75,6 +75,44 @@ func TestObjectStore_GetSet(t *testing.T) { test(t, true) } +func TestObjectStore_GetWithEntityIDStr(t *testing.T) { + test := func(t *testing.T, isComposite bool) { + cfg := configmock.New(t) + cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", isComposite) + + store := NewObjectStore[any](cfg) + + id := types.NewEntityID("prefix", "id") + idStr := id.String() + // getting a non-existent item + obj, found := store.GetWithEntityIDStr(idStr) + assert.Nil(t, obj) + assert.Falsef(t, found, "item should not be found in store") + + // set item + store.Set(id, struct{}{}) + + // getting item + obj, found = store.GetWithEntityIDStr(idStr) + assert.NotNil(t, obj) + assert.Truef(t, found, "item should be found in store") + + // unsetting item + store.Unset(id) + + // getting a non-existent item + obj, found = store.GetWithEntityIDStr(idStr) + assert.Nil(t, obj) + assert.Falsef(t, found, "item should not be found in store") + } + + // default store + test(t, false) + + // composite store + test(t, true) +} + func TestObjectStore_Size(t *testing.T) { test := func(t *testing.T, isComposite bool) { @@ -114,20 +152,26 @@ func TestObjectStore_ListObjects(t *testing.T) { cfg.SetWithoutSource("tagger.tagstore_use_composite_entity_id", isComposite) store := NewObjectStore[any](cfg) + // build some filter + fb := types.NewFilterBuilder() + fb.Include(types.EntityIDPrefix("prefix1"), types.EntityIDPrefix("prefix2")) + filter := fb.Build(types.HighCardinality) + // list should return empty - list := store.ListObjects() + list := store.ListObjects(filter) assert.Equalf(t, len(list), 0, "ListObjects should return an empty list") // add some items ids := []string{"prefix1://id1", "prefix2://id2", "prefix3://id3", "prefix4://id4"} for _, id := range ids { entityID, _ := types.NewEntityIDFromString(id) - store.Set(entityID, struct{}{}) + store.Set(entityID, id) } // list should return empty - list = store.ListObjects() - assert.Equalf(t, len(list), len(ids), "ListObjects should return a list of size %d", len(ids)) + list = store.ListObjects(filter) + expectedListing := []any{"prefix1://id1", "prefix2://id2"} + assert.ElementsMatch(t, expectedListing, list) } // default store @@ -152,10 +196,16 @@ func TestObjectStore_ForEach(t *testing.T) { } accumulator := []string{} - store.ForEach(func(id types.EntityID, _ any) { accumulator = append(accumulator, id.String()) }) + + // build some filter + fb := types.NewFilterBuilder() + fb.Include(types.EntityIDPrefix("prefix1"), types.EntityIDPrefix("prefix2")) + filter := fb.Build(types.HighCardinality) + + store.ForEach(filter, func(id types.EntityID, _ any) { accumulator = append(accumulator, id.String()) }) // list should return empty - assert.ElementsMatch(t, accumulator, ids) + assert.ElementsMatch(t, accumulator, []string{"prefix1://id1", "prefix2://id2"}) } // default store diff --git a/comp/core/tagger/taggerimpl/local/fake_tagger.go b/comp/core/tagger/taggerimpl/local/fake_tagger.go index 7882d8ee26389..32c4ae31cb5b0 100644 --- a/comp/core/tagger/taggerimpl/local/fake_tagger.go +++ b/comp/core/tagger/taggerimpl/local/fake_tagger.go @@ -150,13 +150,8 @@ func (f *FakeTagger) List() types.TaggerListResponse { } // Subscribe fake implementation -func (f *FakeTagger) Subscribe(cardinality types.TagCardinality) chan []types.EntityEvent { - return f.store.Subscribe(cardinality) -} - -// Unsubscribe fake implementation -func (f *FakeTagger) Unsubscribe(ch chan []types.EntityEvent) { - f.store.Unsubscribe(ch) +func (f *FakeTagger) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) { + return f.store.Subscribe(subscriptionID, filter) } // Fake internals diff --git a/comp/core/tagger/taggerimpl/local/tagger.go b/comp/core/tagger/taggerimpl/local/tagger.go index 5a0d1dbf31afd..9654e1bf2becb 100644 --- a/comp/core/tagger/taggerimpl/local/tagger.go +++ b/comp/core/tagger/taggerimpl/local/tagger.go @@ -75,13 +75,13 @@ func (t *Tagger) Stop() error { } // getTags returns a read only list of tags for a given entity. -func (t *Tagger) getTags(entityID types.EntityID, cardinality types.TagCardinality) (tagset.HashedTags, error) { - if entityID.GetID() == "" { +func (t *Tagger) getTags(entityID string, cardinality types.TagCardinality) (tagset.HashedTags, error) { + if entityID == "" { t.telemetryStore.QueriesByCardinality(cardinality).EmptyEntityID.Inc() return tagset.HashedTags{}, fmt.Errorf("empty entity ID") } - cachedTags := t.tagStore.LookupHashed(entityID, cardinality) + cachedTags := t.tagStore.LookupHashedWithEntityStr(entityID, cardinality) t.telemetryStore.QueriesByCardinality(cardinality).Success.Inc() return cachedTags, nil @@ -89,16 +89,14 @@ func (t *Tagger) getTags(entityID types.EntityID, cardinality types.TagCardinali // AccumulateTagsFor appends tags for a given entity from the tagger to the TagsAccumulator func (t *Tagger) AccumulateTagsFor(entityID string, cardinality types.TagCardinality, tb tagset.TagsAccumulator) error { - id, _ := types.NewEntityIDFromString(entityID) - tags, err := t.getTags(id, cardinality) + tags, err := t.getTags(entityID, cardinality) tb.AppendHashed(tags) return err } // Tag returns a copy of the tags for a given entity func (t *Tagger) Tag(entityID string, cardinality types.TagCardinality) ([]string, error) { - id, _ := types.NewEntityIDFromString(entityID) - tags, err := t.getTags(id, cardinality) + tags, err := t.getTags(entityID, cardinality) if err != nil { return nil, err } @@ -130,13 +128,8 @@ func (t *Tagger) List() types.TaggerListResponse { // Subscribe returns a channel that receives a slice of events whenever an entity is // added, modified or deleted. It can send an initial burst of events only to the new // subscriber, without notifying all of the others. -func (t *Tagger) Subscribe(cardinality types.TagCardinality) chan []types.EntityEvent { - return t.tagStore.Subscribe(cardinality) -} - -// Unsubscribe ends a subscription to entity events and closes its channel. -func (t *Tagger) Unsubscribe(ch chan []types.EntityEvent) { - t.tagStore.Unsubscribe(ch) +func (t *Tagger) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) { + return t.tagStore.Subscribe(subscriptionID, filter) } // ReplayTagger returns the replay tagger instance diff --git a/comp/core/tagger/taggerimpl/local/tagger_test.go b/comp/core/tagger/taggerimpl/local/tagger_test.go index f45e470f7701e..0d8f8c058fcc9 100644 --- a/comp/core/tagger/taggerimpl/local/tagger_test.go +++ b/comp/core/tagger/taggerimpl/local/tagger_test.go @@ -27,8 +27,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -func TestTagBuilder(t *testing.T) { - +func TestAccumulateTagsFor(t *testing.T) { entityID := types.NewEntityID("", "entity_name") store := fxutil.Test[workloadmetamock.Mock](t, fx.Options( @@ -65,3 +64,50 @@ func TestTagBuilder(t *testing.T) { assert.NoError(t, err) assert.ElementsMatch(t, []string{"high", "low1", "low2"}, tb.Get()) } + +func TestTag(t *testing.T) { + entityID := types.NewEntityID(types.ContainerID, "123") + entityIDStr := entityID.String() + + store := fxutil.Test[workloadmetamock.Mock](t, fx.Options( + fx.Supply(config.Params{}), + fx.Supply(log.Params{}), + fx.Provide(func() log.Component { return logmock.New(t) }), + config.MockModule(), + workloadmetafxmock.MockModule(workloadmeta.NewParams()), + )) + + tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) + telemetryStore := taggerTelemetry.NewStore(tel) + cfg := configmock.New(t) + tagger := NewTagger(cfg, store, telemetryStore) + + tagger.tagStore.ProcessTagInfo([]*types.TagInfo{ + { + EntityID: entityID, + Source: "stream", + LowCardTags: []string{"low1"}, + OrchestratorCardTags: []string{"orchestrator1"}, + HighCardTags: []string{"high1"}, + }, + { + EntityID: entityID, + Source: "pull", + LowCardTags: []string{"low2"}, + OrchestratorCardTags: []string{"orchestrator2"}, + HighCardTags: []string{"high2"}, + }, + }) + + lowCardTags, err := tagger.Tag(entityIDStr, types.LowCardinality) + assert.NoError(t, err) + assert.ElementsMatch(t, []string{"low1", "low2"}, lowCardTags) + + orchestratorCardTags, err := tagger.Tag(entityIDStr, types.OrchestratorCardinality) + assert.NoError(t, err) + assert.ElementsMatch(t, []string{"low1", "low2", "orchestrator1", "orchestrator2"}, orchestratorCardTags) + + highCardTags, err := tagger.Tag(entityIDStr, types.HighCardinality) + assert.NoError(t, err) + assert.ElementsMatch(t, []string{"low1", "low2", "orchestrator1", "orchestrator2", "high1", "high2"}, highCardTags) +} diff --git a/comp/core/tagger/taggerimpl/remote/tagger.go b/comp/core/tagger/taggerimpl/remote/tagger.go index e0a7d7ec01f17..c0af4b6059b17 100644 --- a/comp/core/tagger/taggerimpl/remote/tagger.go +++ b/comp/core/tagger/taggerimpl/remote/tagger.go @@ -263,13 +263,8 @@ func (t *Tagger) List() types.TaggerListResponse { // Subscribe returns a channel that receives a slice of events whenever an entity is // added, modified or deleted. It can send an initial burst of events only to the new // subscriber, without notifying all of the others. -func (t *Tagger) Subscribe(cardinality types.TagCardinality) chan []types.EntityEvent { - return t.store.subscribe(cardinality) -} - -// Unsubscribe ends a subscription to entity events and closes its channel. -func (t *Tagger) Unsubscribe(ch chan []types.EntityEvent) { - t.store.unsubscribe(ch) +func (t *Tagger) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) { + return t.store.subscribe(subscriptionID, filter) } func (t *Tagger) run() { diff --git a/comp/core/tagger/taggerimpl/remote/tagstore.go b/comp/core/tagger/taggerimpl/remote/tagstore.go index 5f3d12de8eac4..26ed1906e358f 100644 --- a/comp/core/tagger/taggerimpl/remote/tagstore.go +++ b/comp/core/tagger/taggerimpl/remote/tagstore.go @@ -23,17 +23,17 @@ type tagStore struct { telemetry map[string]float64 cfg config.Component - subscriber *subscriber.Subscriber - telemetryStore *telemetry.Store + subscriptionManager subscriber.SubscriptionManager + telemetryStore *telemetry.Store } func newTagStore(cfg config.Component, telemetryStore *telemetry.Store) *tagStore { return &tagStore{ - store: genericstore.NewObjectStore[*types.Entity](cfg), - telemetry: make(map[string]float64), - cfg: cfg, - subscriber: subscriber.NewSubscriber(telemetryStore), - telemetryStore: telemetryStore, + store: genericstore.NewObjectStore[*types.Entity](cfg), + telemetry: make(map[string]float64), + cfg: cfg, + subscriptionManager: subscriber.NewSubscriptionManager(telemetryStore), + telemetryStore: telemetryStore, } } @@ -81,7 +81,7 @@ func (s *tagStore) getEntity(entityID types.EntityID) *types.Entity { func (s *tagStore) listEntities() []*types.Entity { s.mutex.RLock() defer s.mutex.RUnlock() - return s.store.ListObjects() + return s.store.ListObjects(types.NewMatchAllFilter()) } func (s *tagStore) collectTelemetry() { @@ -93,7 +93,7 @@ func (s *tagStore) collectTelemetry() { s.mutex.Lock() defer s.mutex.Unlock() - s.store.ForEach(func(_ types.EntityID, e *types.Entity) { s.telemetry[string(e.ID.GetPrefix())]++ }) + s.store.ForEach(nil, func(_ types.EntityID, e *types.Entity) { s.telemetry[string(e.ID.GetPrefix())]++ }) for prefix, storedEntities := range s.telemetry { s.telemetryStore.StoredEntities.Set(storedEntities, remoteSource, prefix) @@ -101,28 +101,24 @@ func (s *tagStore) collectTelemetry() { } } -func (s *tagStore) subscribe(cardinality types.TagCardinality) chan []types.EntityEvent { +func (s *tagStore) subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) { s.mutex.RLock() defer s.mutex.RUnlock() events := make([]types.EntityEvent, 0, s.store.Size()) - s.store.ForEach(func(_ types.EntityID, e *types.Entity) { + s.store.ForEach(nil, func(_ types.EntityID, e *types.Entity) { events = append(events, types.EntityEvent{ EventType: types.EventTypeAdded, Entity: *e, }) }) - return s.subscriber.Subscribe(cardinality, events) -} - -func (s *tagStore) unsubscribe(ch chan []types.EntityEvent) { - s.subscriber.Unsubscribe(ch) + return s.subscriptionManager.Subscribe(subscriptionID, filter, events) } func (s *tagStore) notifySubscribers(events []types.EntityEvent) { - s.subscriber.Notify(events) + s.subscriptionManager.Notify(events) } // reset clears the local store, preparing it to be re-initialized from a fresh @@ -138,7 +134,7 @@ func (s *tagStore) reset() { events := make([]types.EntityEvent, 0, s.store.Size()) - s.store.ForEach(func(_ types.EntityID, e *types.Entity) { + s.store.ForEach(nil, func(_ types.EntityID, e *types.Entity) { events = append(events, types.EntityEvent{ EventType: types.EventTypeDeleted, Entity: types.Entity{ID: e.ID}, diff --git a/comp/core/tagger/taggerimpl/replay/tagger.go b/comp/core/tagger/taggerimpl/replay/tagger.go index 5c49d195ab0a6..33b736dfd44b2 100644 --- a/comp/core/tagger/taggerimpl/replay/tagger.go +++ b/comp/core/tagger/taggerimpl/replay/tagger.go @@ -8,6 +8,7 @@ package replay import ( "context" + "fmt" "time" "github.com/DataDog/datadog-agent/comp/core/config" @@ -102,14 +103,9 @@ func (t *Tagger) List() types.TaggerListResponse { } // Subscribe does nothing in the replay tagger this tagger does not respond to events. -func (t *Tagger) Subscribe(types.TagCardinality) chan []types.EntityEvent { - // NOP - return nil -} - -// Unsubscribe does nothing in the replay tagger this tagger does not respond to events. -func (t *Tagger) Unsubscribe(chan []types.EntityEvent) { +func (t *Tagger) Subscribe(_ string, _ *types.Filter) (types.Subscription, error) { // NOP + return nil, fmt.Errorf("not implemented") } // ReplayTagger returns the replay tagger instance diff --git a/comp/core/tagger/taggerimpl/server/server.go b/comp/core/tagger/taggerimpl/server/server.go index 946a194235579..c614878f5fa54 100644 --- a/comp/core/tagger/taggerimpl/server/server.go +++ b/comp/core/tagger/taggerimpl/server/server.go @@ -9,6 +9,7 @@ package server import ( "context" "fmt" + "sync/atomic" "time" "google.golang.org/grpc/codes" @@ -27,15 +28,31 @@ const ( streamKeepAliveInterval = 9 * time.Minute ) +// streamIDManager is used to generate unique ID's for incoming streaming requests +// This unique ID is used to subscribe to the tagger +// TODO: remove this struct when the protobuf of the stream request is updated to use filters. +type streamIDManager struct { + atomic.Int32 +} + +func (s *streamIDManager) getNextUniqueID() string { + id := fmt.Sprintf("stream-client-%d", s.Add(1)) + return id +} + +var sharedIDManager streamIDManager + // Server is a grpc server that streams tagger entities type Server struct { taggerComponent tagger.Component + manager *streamIDManager } // NewServer returns a new Server func NewServer(t tagger.Component) *Server { return &Server{ taggerComponent: t, + manager: &sharedIDManager, } } @@ -53,14 +70,21 @@ func (s *Server) TaggerStreamEntities(in *pb.StreamTagsRequest, out pb.AgentSecu // these filters will be introduced when we implement a container // metadata service that can receive them as is from the tagger. - eventCh := s.taggerComponent.Subscribe(cardinality) - defer s.taggerComponent.Unsubscribe(eventCh) + filter := types.NewFilterBuilder().Build(cardinality) + + subscriptionID := s.manager.getNextUniqueID() + subscription, err := s.taggerComponent.Subscribe(subscriptionID, filter) + if err != nil { + return err + } + + defer subscription.Unsubscribe() ticker := time.NewTicker(streamKeepAliveInterval) defer ticker.Stop() for { select { - case events, ok := <-eventCh: + case events, ok := <-subscription.EventsChan(): if !ok { log.Warnf("subscriber channel closed, client will reconnect") return fmt.Errorf("subscriber channel closed") diff --git a/comp/core/tagger/taggerimpl/subscriber/subscriber.go b/comp/core/tagger/taggerimpl/subscriber/subscriber.go index 6f023b7203dd1..ee275339ae855 100644 --- a/comp/core/tagger/taggerimpl/subscriber/subscriber.go +++ b/comp/core/tagger/taggerimpl/subscriber/subscriber.go @@ -8,113 +8,31 @@ package subscriber import ( - "sync" - - "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/pkg/util/log" ) const bufferSize = 1000 -// Subscriber allows processes to subscribe to entity events generated from a -// tagger. +// Subscriber describes a component that subscribes to the tagger +// Subscriber implements types.Subscription type Subscriber struct { - sync.RWMutex - subscribers map[chan []types.EntityEvent]types.TagCardinality - telemetryStore *telemetry.Store -} - -// NewSubscriber returns a new subscriber. -func NewSubscriber(telemetryStore *telemetry.Store) *Subscriber { - return &Subscriber{ - subscribers: make(map[chan []types.EntityEvent]types.TagCardinality), - telemetryStore: telemetryStore, - } -} - -// Subscribe returns a channel that receives a slice of events whenever an -// entity is added, modified or deleted. It can send an initial burst of events -// only to the new subscriber, without notifying all of the others. -func (s *Subscriber) Subscribe(cardinality types.TagCardinality, events []types.EntityEvent) chan []types.EntityEvent { - // this is a `ch []EntityEvent` instead of a `ch EntityEvent` to - // improve throughput, as bursts of events are as likely to occur as - // isolated events, especially at startup or with collectors that - // periodically pull changes. - ch := make(chan []types.EntityEvent, bufferSize) - - s.Lock() - s.subscribers[ch] = cardinality - s.telemetryStore.Subscribers.Inc() - s.Unlock() - - if len(events) > 0 { - s.notify(ch, events, cardinality) - } - - return ch + filter types.Filter + id string + ch chan []types.EntityEvent + manager SubscriptionManager } -// Unsubscribe ends a subscription to entity events and closes its channel. -func (s *Subscriber) Unsubscribe(ch chan []types.EntityEvent) { - s.Lock() - defer s.Unlock() - - s.unsubscribe(ch) +// ID implements #types.Subscription.ID +func (s *Subscriber) ID() string { + return s.id } -// unsubscribe ends a subscription to entity events and closes its channel. It -// is not thread-safe, and callers should take care of synchronization. -func (s *Subscriber) unsubscribe(ch chan []types.EntityEvent) { - if _, ok := s.subscribers[ch]; ok { - s.telemetryStore.Subscribers.Dec() - delete(s.subscribers, ch) - close(ch) - } +// EventsChan implements #types.Subscription.EventsChan +func (s *Subscriber) EventsChan() chan []types.EntityEvent { + return s.ch } -// Notify sends a slice of EntityEvents to all registered subscribers at their -// chosen cardinality. -func (s *Subscriber) Notify(events []types.EntityEvent) { - if len(events) == 0 { - return - } - - s.Lock() - defer s.Unlock() - - for ch, cardinality := range s.subscribers { - if len(ch) >= bufferSize { - log.Info("channel full, canceling subscription") - s.unsubscribe(ch) - continue - } - - s.notify(ch, events, cardinality) - } -} - -// notify sends a slice of EntityEvents to a channel at a chosen cardinality. -func (s *Subscriber) notify(ch chan []types.EntityEvent, events []types.EntityEvent, cardinality types.TagCardinality) { - subscriberEvents := make([]types.EntityEvent, 0, len(events)) - - for _, event := range events { - var entity types.Entity - - if event.EventType == types.EventTypeDeleted { - entity = types.Entity{ID: event.Entity.ID} - } else { - entity = event.Entity.Copy(cardinality) - } - - subscriberEvents = append(subscriberEvents, types.EntityEvent{ - EventType: event.EventType, - Entity: entity, - }) - } - - s.telemetryStore.Sends.Inc() - s.telemetryStore.Events.Add(float64(len(events)), types.TagCardinalityToString(cardinality)) - - ch <- subscriberEvents +// Unsubscribe implements #types.Subscription.Unsubscribe +func (s *Subscriber) Unsubscribe() { + s.manager.Unsubscribe(s.ID()) } diff --git a/comp/core/tagger/taggerimpl/subscriber/subscriber_test.go b/comp/core/tagger/taggerimpl/subscriber/subscriber_test.go deleted file mode 100644 index f99a8d173c448..0000000000000 --- a/comp/core/tagger/taggerimpl/subscriber/subscriber_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package subscriber - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - taggerTelemetry "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" - "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" -) - -var ( - entityID = types.NewEntityID("foo", "bar") -) - -func TestSubscriber(t *testing.T) { - events := map[string]types.EntityEvent{ - "added": { - EventType: types.EventTypeAdded, - Entity: types.Entity{ - ID: entityID, - }, - }, - "modified": { - EventType: types.EventTypeModified, - }, - "deleted": { - EventType: types.EventTypeDeleted, - }, - } - tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) - telemetryStore := taggerTelemetry.NewStore(tel) - s := NewSubscriber(telemetryStore) - - prevCh := s.Subscribe(types.LowCardinality, nil) - - s.Notify([]types.EntityEvent{ - events["added"], - events["modified"], - }) - - newCh := s.Subscribe(types.LowCardinality, []types.EntityEvent{ - events["added"], - }) - - s.Notify([]types.EntityEvent{ - events["modified"], - events["deleted"], - }) - - s.Unsubscribe(prevCh) - s.Unsubscribe(newCh) - - expectedPrevChEvents := []types.EntityEvent{ - events["added"], - events["modified"], - events["modified"], - events["deleted"], - } - expectedNewChEvents := []types.EntityEvent{ - events["added"], - events["modified"], - events["deleted"], - } - - prevChEvents := []types.EntityEvent{} - for e := range prevCh { - prevChEvents = append(prevChEvents, e...) - } - - newChEvents := []types.EntityEvent{} - for e := range newCh { - newChEvents = append(newChEvents, e...) - } - - assert.Equal(t, expectedPrevChEvents, prevChEvents) - assert.Equal(t, expectedNewChEvents, newChEvents) -} diff --git a/comp/core/tagger/taggerimpl/subscriber/subscription_manager.go b/comp/core/tagger/taggerimpl/subscriber/subscription_manager.go new file mode 100644 index 0000000000000..fce30fdc25a6e --- /dev/null +++ b/comp/core/tagger/taggerimpl/subscriber/subscription_manager.go @@ -0,0 +1,178 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package subscriber + +import ( + "fmt" + "sync" + + "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// SubscriptionManager allows processes to subscribe to entity events generated from a +// tagger. +type SubscriptionManager interface { + Subscribe(id string, filter *types.Filter, events []types.EntityEvent) (types.Subscription, error) + Unsubscribe(subscriptionID string) + Notify(events []types.EntityEvent) +} + +// subscriptionManager implements SubscriptionManager +type subscriptionManager struct { + sync.RWMutex + subscribers map[string]*Subscriber + prefixToSub map[types.EntityIDPrefix][]*Subscriber + telemetryStore *telemetry.Store +} + +// NewSubscriptionManager creates and returns a new subscription manager +func NewSubscriptionManager(telemetryStore *telemetry.Store) SubscriptionManager { + return &subscriptionManager{ + subscribers: make(map[string]*Subscriber), + prefixToSub: make(map[types.EntityIDPrefix][]*Subscriber), + telemetryStore: telemetryStore, + } +} + +// Subscribe returns a channel that receives a slice of events whenever an +// entity is added, modified or deleted. It can send an initial burst of events +// only to the new subscriber, without notifying all of the others. +func (sm *subscriptionManager) Subscribe(id string, filter *types.Filter, events []types.EntityEvent) (types.Subscription, error) { + // this is a `ch []EntityEvent` instead of a `ch EntityEvent` to + // improve throughput, as bursts of events are as likely to occur as + // isolated events, especially at startup or with collectors that + // periodically pull changes. + ch := make(chan []types.EntityEvent, bufferSize) + + if _, found := sm.subscribers[id]; found { + return nil, fmt.Errorf("duplicate subscription id error: subscription id %s is already in use", id) + } + + subscriber := &Subscriber{ + filter: *filter, + id: id, + ch: ch, + manager: sm, + } + + sm.Lock() + sm.subscribers[id] = subscriber + sm.telemetryStore.Subscribers.Inc() + + for prefix := range subscriber.filter.GetPrefixes() { + sm.prefixToSub[prefix] = append(sm.prefixToSub[prefix], subscriber) + } + + sm.Unlock() + + if len(events) > 0 { + sm.notify(ch, events, subscriber.filter.GetCardinality()) + } + + return subscriber, nil +} + +// unsubscribe ends a subscription to entity events and closes its channel. It +// is not thread-safe, and callers should take care of synchronization. +func (sm *subscriptionManager) Unsubscribe(subscriptionID string) { + sm.Lock() + defer sm.Unlock() + + sub, found := sm.subscribers[subscriptionID] + if !found { + log.Debugf("subscriber with %q is already unsubscribed", sub.id) + return + } + + for prefix := range sub.filter.GetPrefixes() { + currentPrefixSubscribers, found := sm.prefixToSub[prefix] + if !found { + // no subscribers for this prefix + continue + } + + newPrefixSubscribers := make([]*Subscriber, 0, len(currentPrefixSubscribers)) + + for _, prefixSubscriber := range currentPrefixSubscribers { + if prefixSubscriber.id != sub.id { + newPrefixSubscribers = append(newPrefixSubscribers, prefixSubscriber) + } + } + + sm.prefixToSub[prefix] = newPrefixSubscribers + } + + delete(sm.subscribers, sub.id) + close(sub.ch) + sm.telemetryStore.Subscribers.Dec() +} + +// Notify sends a slice of EntityEvents to all registered subscribers at their +// chosen cardinality. +func (sm *subscriptionManager) Notify(events []types.EntityEvent) { + if len(events) == 0 { + return + } + + sm.Lock() + defer sm.Unlock() + + subIDToEvents := map[string][]types.EntityEvent{} + + for _, event := range events { + entityID := event.Entity.ID + + if entityID == nil { + log.Warn("subscription manager received an entity with invalid nil id") + continue + } + + prefix := event.Entity.ID.GetPrefix() + if subscribers, found := sm.prefixToSub[prefix]; found { + for _, subscriber := range subscribers { + + if len(subscriber.ch) >= bufferSize { + log.Info("channel full, canceling subscription") + sm.Unsubscribe(subscriber.id) + continue + } + subIDToEvents[subscriber.id] = append(subIDToEvents[subscriber.id], event) + } + } + } + + for subID, events := range subIDToEvents { + subscriber := sm.subscribers[subID] + sm.notify(subscriber.ch, events, subscriber.filter.GetCardinality()) + } +} + +// notify sends a slice of EntityEvents to a channel at a chosen cardinality. +func (sm *subscriptionManager) notify(ch chan []types.EntityEvent, events []types.EntityEvent, cardinality types.TagCardinality) { + subscriberEvents := make([]types.EntityEvent, 0, len(events)) + + for _, event := range events { + var entity types.Entity + + if event.EventType == types.EventTypeDeleted { + entity = types.Entity{ID: event.Entity.ID} + } else { + entity = event.Entity.Copy(cardinality) + } + + subscriberEvents = append(subscriberEvents, types.EntityEvent{ + EventType: event.EventType, + Entity: entity, + }) + } + + sm.telemetryStore.Sends.Inc() + sm.telemetryStore.Events.Add(float64(len(events)), types.TagCardinalityToString(cardinality)) + + ch <- subscriberEvents +} diff --git a/comp/core/tagger/taggerimpl/subscriber/subscription_manager_test.go b/comp/core/tagger/taggerimpl/subscriber/subscription_manager_test.go new file mode 100644 index 0000000000000..b9684af30e2d4 --- /dev/null +++ b/comp/core/tagger/taggerimpl/subscriber/subscription_manager_test.go @@ -0,0 +1,219 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package subscriber + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + taggerTelemetry "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" + "github.com/DataDog/datadog-agent/comp/core/telemetry" + "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +func TestSubscriptionManager(t *testing.T) { + + entityID := types.NewEntityID("foo", "bar") + + events := map[string]types.EntityEvent{ + "added": { + EventType: types.EventTypeAdded, + Entity: types.Entity{ + ID: entityID, + HighCardinalityTags: []string{"t1:v1", "t2:v2", "t3:v3"}, + OrchestratorCardinalityTags: []string{"t1:v1", "t2:v2"}, + LowCardinalityTags: []string{"t1:v1"}, + StandardTags: []string{"s1:v1"}, + }, + }, + "modified": { + EventType: types.EventTypeModified, + Entity: types.Entity{ + ID: entityID, + HighCardinalityTags: []string{"t1:v1", "t2:v2", "t3:v3", "t4:v4"}, + OrchestratorCardinalityTags: []string{"t1:v1", "t2:v2"}, + LowCardinalityTags: []string{"t1:v1"}, + StandardTags: []string{"s1:v1"}, + }, + }, + "deleted": { + EventType: types.EventTypeDeleted, + Entity: types.Entity{ + ID: entityID, + }, + }, + "added-with-no-id": { + EventType: types.EventTypeAdded, + }, + "added-with-unmatched-prefix": { + EventType: types.EventTypeAdded, + Entity: types.Entity{ + ID: types.NewEntityID("gee", "goo"), + }, + }, + } + tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) + telemetryStore := taggerTelemetry.NewStore(tel) + sm := NewSubscriptionManager(telemetryStore) + + // Low Cardinality Subscriber + lowCardSubID := "low-card-sub" + lowCardSubscription, err := sm.Subscribe(lowCardSubID, types.NewFilterBuilder().Include(types.EntityIDPrefix("foo")).Build(types.LowCardinality), nil) + require.NoError(t, err) + + sm.Notify([]types.EntityEvent{ + events["added"], + events["modified"], + events["deleted"], + events["added-with-no-id"], + events["added-with-unmatched-prefix"], + }) + + lowCardSubscription.Unsubscribe() + + // Orchestrator Cardinality Subscriber + orchCardSubID := "orch-card-sub" + orchCardSubscription, err := sm.Subscribe(orchCardSubID, types.NewFilterBuilder().Include(types.EntityIDPrefix("foo")).Build(types.OrchestratorCardinality), nil) + require.NoError(t, err) + + sm.Notify([]types.EntityEvent{ + events["added"], + events["modified"], + events["deleted"], + events["added-with-no-id"], + events["added-with-unmatched-prefix"], + }) + + orchCardSubscription.Unsubscribe() + + // High Cardinality Subscriber + highCardSubID := "high-card-sub" + highCardSubscription, err := sm.Subscribe(highCardSubID, types.NewFilterBuilder().Include(types.EntityIDPrefix("foo")).Build(types.HighCardinality), []types.EntityEvent{ + events["added"], + }) + require.NoError(t, err) + + sm.Notify([]types.EntityEvent{ + events["modified"], + events["deleted"], + events["added-with-no-id"], + events["added-with-unmatched-prefix"], + }) + + highCardSubscription.Unsubscribe() + + // Verify low cardinality subscriber received events + assertReceivedEvents(t, lowCardSubscription.EventsChan(), []types.EntityEvent{ + { + EventType: types.EventTypeAdded, + Entity: types.Entity{ + ID: entityID, + LowCardinalityTags: []string{"t1:v1"}, + StandardTags: []string{"s1:v1"}, + }, + }, + { + EventType: types.EventTypeModified, + Entity: types.Entity{ + ID: entityID, + LowCardinalityTags: []string{"t1:v1"}, + StandardTags: []string{"s1:v1"}, + }, + }, + { + EventType: types.EventTypeDeleted, + Entity: types.Entity{ + ID: entityID, + }, + }, + }) + + // Verify orchestrator cardinality subscriber received events + assertReceivedEvents(t, orchCardSubscription.EventsChan(), []types.EntityEvent{ + { + EventType: types.EventTypeAdded, + Entity: types.Entity{ + ID: entityID, + OrchestratorCardinalityTags: []string{"t1:v1", "t2:v2"}, + LowCardinalityTags: []string{"t1:v1"}, + StandardTags: []string{"s1:v1"}, + }, + }, + { + EventType: types.EventTypeModified, + Entity: types.Entity{ + ID: entityID, + OrchestratorCardinalityTags: []string{"t1:v1", "t2:v2"}, + LowCardinalityTags: []string{"t1:v1"}, + StandardTags: []string{"s1:v1"}, + }, + }, + { + EventType: types.EventTypeDeleted, + Entity: types.Entity{ + ID: entityID, + }, + }, + }) + + // Verify high cardinality subscriber received events + assertReceivedEvents(t, highCardSubscription.EventsChan(), []types.EntityEvent{ + { + EventType: types.EventTypeAdded, + Entity: types.Entity{ + ID: entityID, + HighCardinalityTags: []string{"t1:v1", "t2:v2", "t3:v3"}, + OrchestratorCardinalityTags: []string{"t1:v1", "t2:v2"}, + LowCardinalityTags: []string{"t1:v1"}, + StandardTags: []string{"s1:v1"}, + }, + }, + { + EventType: types.EventTypeModified, + Entity: types.Entity{ + ID: entityID, + HighCardinalityTags: []string{"t1:v1", "t2:v2", "t3:v3", "t4:v4"}, + OrchestratorCardinalityTags: []string{"t1:v1", "t2:v2"}, + LowCardinalityTags: []string{"t1:v1"}, + StandardTags: []string{"s1:v1"}, + }, + }, + { + EventType: types.EventTypeDeleted, + Entity: types.Entity{ + ID: entityID, + }, + }, + }) +} + +func assertReceivedEvents(t *testing.T, ch chan []types.EntityEvent, expectedEvents []types.EntityEvent) { + receivedEvents := []types.EntityEvent{} + + for e := range ch { + receivedEvents = append(receivedEvents, e...) + } + + assert.ElementsMatch(t, receivedEvents, expectedEvents) +} + +func TestSubscriptionManagerDuplicateSubscriberID(t *testing.T) { + tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) + telemetryStore := taggerTelemetry.NewStore(tel) + sm := NewSubscriptionManager(telemetryStore) + + // Low Cardinality Subscriber + lowCardSubID := "low-card-sub" + _, err := sm.Subscribe(lowCardSubID, types.NewFilterBuilder().Include(types.EntityIDPrefix("foo")).Build(types.LowCardinality), nil) + require.NoError(t, err) + + _, err = sm.Subscribe(lowCardSubID, types.NewFilterBuilder().Include(types.EntityIDPrefix("foo")).Build(types.LowCardinality), nil) + require.Error(t, err) +} diff --git a/comp/core/tagger/taggerimpl/tagger.go b/comp/core/tagger/taggerimpl/tagger.go index 87bd87086b736..c8c0b2020c846 100644 --- a/comp/core/tagger/taggerimpl/tagger.go +++ b/comp/core/tagger/taggerimpl/tagger.go @@ -425,30 +425,30 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty // | empty | not empty || container prefix + originFromMsg | // | none | not empty || container prefix + originFromMsg | if t.datadogConfig.dogstatsdOptOutEnabled && originInfo.Cardinality == "none" { - originInfo.FromUDS = packets.NoOrigin - originInfo.FromTag = "" - originInfo.FromMsg = "" + originInfo.ContainerIDFromSocket = packets.NoOrigin + originInfo.PodUID = "" + originInfo.ContainerID = "" return } // We use the UDS socket origin if no origin ID was specify in the tags // or 'dogstatsd_entity_id_precedence' is set to False (default false). - if originInfo.FromUDS != packets.NoOrigin && - (originInfo.FromTag == "" || !t.datadogConfig.dogstatsdEntityIDPrecedenceEnabled) { - if err := t.AccumulateTagsFor(originInfo.FromUDS, cardinality, tb); err != nil { + if originInfo.ContainerIDFromSocket != packets.NoOrigin && + (originInfo.PodUID == "" || !t.datadogConfig.dogstatsdEntityIDPrecedenceEnabled) { + if err := t.AccumulateTagsFor(originInfo.ContainerIDFromSocket, cardinality, tb); err != nil { t.log.Errorf("%s", err.Error()) } } // originFromClient can either be originInfo.FromTag or originInfo.FromMsg originFromClient := "" - if originInfo.FromTag != "" && originInfo.FromTag != "none" { + if originInfo.PodUID != "" && originInfo.PodUID != "none" { // Check if the value is not "none" in order to avoid calling the tagger for entity that doesn't exist. // Currently only supported for pods - originFromClient = types.NewEntityID(types.KubernetesPodUID, originInfo.FromTag).String() - } else if originInfo.FromTag == "" && len(originInfo.FromMsg) > 0 { + originFromClient = types.NewEntityID(types.KubernetesPodUID, originInfo.PodUID).String() + } else if originInfo.PodUID == "" && len(originInfo.ContainerID) > 0 { // originInfo.FromMsg is the container ID sent by the newer clients. - originFromClient = types.NewEntityID(types.ContainerID, originInfo.FromMsg).String() + originFromClient = types.NewEntityID(types.ContainerID, originInfo.ContainerID).String() } if originFromClient != "" { @@ -459,18 +459,18 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty } default: // Tag using Local Data - if originInfo.FromUDS != packets.NoOrigin { - if err := t.AccumulateTagsFor(originInfo.FromUDS, cardinality, tb); err != nil { + if originInfo.ContainerIDFromSocket != packets.NoOrigin { + if err := t.AccumulateTagsFor(originInfo.ContainerIDFromSocket, cardinality, tb); err != nil { t.log.Errorf("%s", err.Error()) } } - if err := t.AccumulateTagsFor(types.ContainerID.ToUID(originInfo.FromMsg), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.FromMsg, err) + if err := t.AccumulateTagsFor(types.ContainerID.ToUID(originInfo.ContainerID), cardinality, tb); err != nil { + t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.ContainerID, err) } - if err := t.AccumulateTagsFor(types.KubernetesPodUID.ToUID(originInfo.FromTag), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.FromTag, err) + if err := t.AccumulateTagsFor(types.KubernetesPodUID.ToUID(originInfo.PodUID), cardinality, tb); err != nil { + t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.PodUID, err) } // Tag using External Data. @@ -503,7 +503,7 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty // Accumulate tags for pod UID if parsedExternalData.podUID != "" { if err := t.AccumulateTagsFor(types.KubernetesPodUID.ToUID(parsedExternalData.podUID), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.FromMsg, err) + t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.ContainerID, err) } } @@ -563,13 +563,8 @@ func taggerCardinality(cardinality string, } // Subscribe calls defaultTagger.Subscribe -func (t *TaggerClient) Subscribe(cardinality types.TagCardinality) chan []types.EntityEvent { - return t.defaultTagger.Subscribe(cardinality) -} - -// Unsubscribe calls defaultTagger.Unsubscribe -func (t *TaggerClient) Unsubscribe(ch chan []types.EntityEvent) { - t.defaultTagger.Unsubscribe(ch) +func (t *TaggerClient) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) { + return t.defaultTagger.Subscribe(subscriptionID, filter) } type optionalTaggerDeps struct { diff --git a/comp/core/tagger/taggerimpl/tagger_test.go b/comp/core/tagger/taggerimpl/tagger_test.go index 600fdfd631685..258d23766c8d5 100644 --- a/comp/core/tagger/taggerimpl/tagger_test.go +++ b/comp/core/tagger/taggerimpl/tagger_test.go @@ -58,25 +58,25 @@ func TestEnrichTags(t *testing.T) { }, { name: "with local data (containerID) and low cardinality", - originInfo: taggertypes.OriginInfo{FromMsg: "container", Cardinality: "low"}, + originInfo: taggertypes.OriginInfo{ContainerID: "container", Cardinality: "low"}, expectedTags: []string{"container-low"}, cidProvider: &fakeCIDProvider{}, }, { name: "with local data (containerID) and high cardinality", - originInfo: taggertypes.OriginInfo{FromMsg: "container", Cardinality: "high"}, + originInfo: taggertypes.OriginInfo{ContainerID: "container", Cardinality: "high"}, expectedTags: []string{"container-low", "container-orch", "container-high"}, cidProvider: &fakeCIDProvider{}, }, { name: "with local data (podUID) and low cardinality", - originInfo: taggertypes.OriginInfo{FromTag: "pod", Cardinality: "low"}, + originInfo: taggertypes.OriginInfo{PodUID: "pod", Cardinality: "low"}, expectedTags: []string{"pod-low"}, cidProvider: &fakeCIDProvider{}, }, { name: "with local data (podUID) and high cardinality", - originInfo: taggertypes.OriginInfo{FromTag: "pod", Cardinality: "high"}, + originInfo: taggertypes.OriginInfo{PodUID: "pod", Cardinality: "high"}, expectedTags: []string{"pod-low", "pod-orch", "pod-high"}, cidProvider: &fakeCIDProvider{}, }, @@ -94,7 +94,7 @@ func TestEnrichTagsOrchestrator(t *testing.T) { defer fakeTagger.ResetTagger() fakeTagger.SetTags("foo://bar", "fooSource", []string{"lowTag"}, []string{"orchTag"}, nil, nil) tb := tagset.NewHashingTagsAccumulator() - fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo://bar", Cardinality: "orchestrator"}) + fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://bar", Cardinality: "orchestrator"}) assert.Equal(t, []string{"lowTag", "orchTag"}, tb.Get()) } @@ -105,9 +105,9 @@ func TestEnrichTagsOptOut(t *testing.T) { cfg.SetWithoutSource("dogstatsd_origin_optout_enabled", true) fakeTagger.SetTags("foo://bar", "fooSource", []string{"lowTag"}, []string{"orchTag"}, nil, nil) tb := tagset.NewHashingTagsAccumulator() - fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo://originID", FromTag: "pod-uid", FromMsg: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) + fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://originID", PodUID: "pod-uid", ContainerID: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) assert.Equal(t, []string{}, tb.Get()) - fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo://originID", FromMsg: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) + fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://originID", ContainerID: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) assert.Equal(t, []string{}, tb.Get()) } diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore.go b/comp/core/tagger/taggerimpl/tagstore/tagstore.go index ddd07cb17252b..d4c4bbf49d766 100644 --- a/comp/core/tagger/taggerimpl/tagstore/tagstore.go +++ b/comp/core/tagger/taggerimpl/tagstore/tagstore.go @@ -41,7 +41,7 @@ type TagStore struct { store types.ObjectStore[EntityTags] telemetry map[string]map[string]float64 - subscriber *subscriber.Subscriber + subscriptionManager subscriber.SubscriptionManager clock clock.Clock @@ -56,12 +56,12 @@ func NewTagStore(cfg config.Component, telemetryStore *telemetry.Store) *TagStor func newTagStoreWithClock(cfg config.Component, clock clock.Clock, telemetryStore *telemetry.Store) *TagStore { return &TagStore{ - telemetry: make(map[string]map[string]float64), - store: genericstore.NewObjectStore[EntityTags](cfg), - subscriber: subscriber.NewSubscriber(telemetryStore), - clock: clock, - cfg: cfg, - telemetryStore: telemetryStore, + telemetry: make(map[string]map[string]float64), + store: genericstore.NewObjectStore[EntityTags](cfg), + subscriptionManager: subscriber.NewSubscriptionManager(telemetryStore), + clock: clock, + cfg: cfg, + telemetryStore: telemetryStore, } } @@ -163,7 +163,7 @@ func (s *TagStore) collectTelemetry() { s.Lock() defer s.Unlock() - s.store.ForEach(func(_ types.EntityID, et EntityTags) { + s.store.ForEach(nil, func(_ types.EntityID, et EntityTags) { prefix := string(et.getEntityID().GetPrefix()) for _, source := range et.sources() { @@ -186,29 +186,24 @@ func (s *TagStore) collectTelemetry() { // Subscribe returns a channel that receives a slice of events whenever an entity is // added, modified or deleted. It can send an initial burst of events only to the new // subscriber, without notifying all of the others. -func (s *TagStore) Subscribe(cardinality types.TagCardinality) chan []types.EntityEvent { +func (s *TagStore) Subscribe(subscriptionID string, filter *types.Filter) (types.Subscription, error) { s.RLock() defer s.RUnlock() events := make([]types.EntityEvent, 0, s.store.Size()) - s.store.ForEach(func(_ types.EntityID, et EntityTags) { + s.store.ForEach(filter, func(_ types.EntityID, et EntityTags) { events = append(events, types.EntityEvent{ EventType: types.EventTypeAdded, Entity: et.toEntity(), }) }) - return s.subscriber.Subscribe(cardinality, events) -} - -// Unsubscribe ends a subscription to entity events and closes its channel. -func (s *TagStore) Unsubscribe(ch chan []types.EntityEvent) { - s.subscriber.Unsubscribe(ch) + return s.subscriptionManager.Subscribe(subscriptionID, filter, events) } func (s *TagStore) notifySubscribers(events []types.EntityEvent) { - s.subscriber.Notify(events) + s.subscriptionManager.Notify(events) } // Prune deletes tags for entities that have been marked as deleted. This is to @@ -220,7 +215,7 @@ func (s *TagStore) Prune() { now := s.clock.Now() events := []types.EntityEvent{} - s.store.ForEach(func(eid types.EntityID, et EntityTags) { + s.store.ForEach(nil, func(eid types.EntityID, et EntityTags) { changed := et.deleteExpired(now) if !changed && !et.shouldRemove() { @@ -259,6 +254,22 @@ func (s *TagStore) LookupHashed(entityID types.EntityID, cardinality types.TagCa return storedTags.getHashedTags(cardinality) } +// LookupHashedWithEntityStr is the same as LookupHashed but takes a string as input. +// This function is needed only for performance reasons. It functions like +// LookupHashed, but accepts a string instead of an EntityID. This reduces the +// allocations that occur when an EntityID is passed as a parameter. +func (s *TagStore) LookupHashedWithEntityStr(entityID string, cardinality types.TagCardinality) tagset.HashedTags { + s.RLock() + defer s.RUnlock() + + storedTags, present := s.store.GetWithEntityIDStr(entityID) + + if !present { + return tagset.HashedTags{} + } + return storedTags.getHashedTags(cardinality) +} + // Lookup gets tags from the store and returns them concatenated in a string slice. func (s *TagStore) Lookup(entityID types.EntityID, cardinality types.TagCardinality) []string { return s.LookupHashed(entityID, cardinality).Get() @@ -283,7 +294,7 @@ func (s *TagStore) List() types.TaggerListResponse { s.RLock() defer s.RUnlock() - for _, et := range s.store.ListObjects() { + for _, et := range s.store.ListObjects(types.NewMatchAllFilter()) { r.Entities[et.getEntityID().String()] = types.TaggerListEntity{ Tags: et.tagsBySource(), } diff --git a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go b/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go index 9e64b8f314e0b..86eed6a4f67c7 100644 --- a/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go +++ b/comp/core/tagger/taggerimpl/tagstore/tagstore_test.go @@ -6,7 +6,6 @@ package tagstore import ( - "fmt" "sync" "testing" "time" @@ -39,7 +38,6 @@ func (s *StoreTestSuite) SetupTest() { s.clock.Add(time.Since(time.Unix(0, 0))) mockConfig := configmock.New(s.T()) - fmt.Println("New Checkpoint: ", mockConfig) s.tagstore = newTagStoreWithClock(mockConfig, s.clock, telemetryStore) } @@ -98,6 +96,37 @@ func (s *StoreTestSuite) TestLookup() { assert.Len(s.T(), tagsOrch, 3) } +func (s *StoreTestSuite) TestLookupHashedWithEntityStr() { + entityID := types.NewEntityID(types.ContainerID, "test") + entityIDStr := entityID.String() + s.tagstore.ProcessTagInfo([]*types.TagInfo{ + { + Source: "source1", + EntityID: entityID, + LowCardTags: []string{"low1"}, + HighCardTags: []string{"high1"}, + }, + { + Source: "source2", + EntityID: entityID, + LowCardTags: []string{"low2"}, + }, + { + Source: "source3", + EntityID: entityID, + OrchestratorCardTags: []string{"orchestrator1"}, + }, + }) + + tagsLow := s.tagstore.LookupHashedWithEntityStr(entityIDStr, types.LowCardinality) + tagsOrch := s.tagstore.LookupHashedWithEntityStr(entityIDStr, types.OrchestratorCardinality) + tagsHigh := s.tagstore.LookupHashedWithEntityStr(entityIDStr, types.HighCardinality) + + assert.ElementsMatch(s.T(), tagsLow.Get(), []string{"low1", "low2"}) + assert.ElementsMatch(s.T(), tagsOrch.Get(), []string{"low1", "low2", "orchestrator1"}) + assert.ElementsMatch(s.T(), tagsHigh.Get(), []string{"low1", "low2", "orchestrator1", "high1"}) +} + func (s *StoreTestSuite) TestLookupStandard() { entityID := types.NewEntityID("", "test") @@ -469,8 +498,8 @@ func TestSubscribe(t *testing.T) { collectors.CollectorPriorities["source2"] = types.ClusterOrchestrator collectors.CollectorPriorities["source"] = types.NodeRuntime - entityID1 := types.NewEntityID("", "test1") - entityID2 := types.NewEntityID("", "test2") + entityID1 := types.NewEntityID(types.ContainerID, "test1") + entityID2 := types.NewEntityID(types.ContainerID, "test2") var expectedEvents = []entityEventExpectation{ {types.EventTypeAdded, entityID1, []string{"low"}, []string{}, []string{"high"}}, {types.EventTypeModified, entityID1, []string{"low"}, []string{"orch"}, []string{"high:1", "high:2"}}, @@ -482,7 +511,7 @@ func TestSubscribe(t *testing.T) { store.ProcessTagInfo([]*types.TagInfo{ { Source: "source", - EntityID: types.NewEntityID("", "test1"), + EntityID: entityID1, LowCardTags: []string{"low"}, HighCardTags: []string{"high"}, }, @@ -491,8 +520,13 @@ func TestSubscribe(t *testing.T) { highCardEvents := []types.EntityEvent{} lowCardEvents := []types.EntityEvent{} - highCardCh := store.Subscribe(types.HighCardinality) - lowCardCh := store.Subscribe(types.LowCardinality) + highCardSubID := "high-card-sub-id" + highCardSubscription, err := store.Subscribe(highCardSubID, types.NewFilterBuilder().Build(types.HighCardinality)) + require.NoError(t, err) + + lowCardSubID := "low-card-sub-id" + lowCardSubscription, err := store.Subscribe(lowCardSubID, types.NewFilterBuilder().Build(types.LowCardinality)) + require.NoError(t, err) store.ProcessTagInfo([]*types.TagInfo{ { @@ -532,12 +566,11 @@ func TestSubscribe(t *testing.T) { var wg sync.WaitGroup wg.Add(2) - go collectEvents(&wg, &highCardEvents, highCardCh) - go collectEvents(&wg, &lowCardEvents, lowCardCh) - - store.Unsubscribe(highCardCh) - store.Unsubscribe(lowCardCh) + go collectEvents(&wg, &highCardEvents, highCardSubscription.EventsChan()) + go collectEvents(&wg, &lowCardEvents, lowCardSubscription.EventsChan()) + highCardSubscription.Unsubscribe() + lowCardSubscription.Unsubscribe() wg.Wait() checkEvents(t, expectedEvents, highCardEvents, types.HighCardinality) diff --git a/comp/core/tagger/taglist/taglist.go b/comp/core/tagger/taglist/taglist.go index f8e36c9318755..9a2bf1196e083 100644 --- a/comp/core/tagger/taglist/taglist.go +++ b/comp/core/tagger/taglist/taglist.go @@ -10,7 +10,7 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // TagList allows collector to incremental build a tag list @@ -30,7 +30,7 @@ func NewTagList() *TagList { orchestratorCardTags: make(map[string]bool), highCardTags: make(map[string]bool), standardTags: make(map[string]bool), - splitList: config.Datadog().GetStringMapString("tag_value_split_separator"), + splitList: pkgconfigsetup.Datadog().GetStringMapString("tag_value_split_separator"), } } diff --git a/comp/core/tagger/tags/tags.go b/comp/core/tagger/tags/tags.go index 2307447fbe789..fa24321003010 100644 --- a/comp/core/tagger/tags/tags.go +++ b/comp/core/tagger/tags/tags.go @@ -108,6 +108,8 @@ const ( EcsContainerName = "ecs_container_name" // EcsClusterName is the tag for the ECS cluster name EcsClusterName = "ecs_cluster_name" + // EcsServiceName is the tag for the ECS service name + EcsServiceName = "ecs_service" // Language is the tag for the process language Language = "language" diff --git a/comp/core/tagger/types/entity_id.go b/comp/core/tagger/types/entity_id.go index 62e44eb75f2f9..f6edb3339071e 100644 --- a/comp/core/tagger/types/entity_id.go +++ b/comp/core/tagger/types/entity_id.go @@ -31,24 +31,24 @@ type defaultEntityID string // GetID implements EntityID#GetID func (de defaultEntityID) GetID() string { - parts := strings.SplitN(string(de), separator, 2) + separatorIndex := strings.Index(string(de), separator) - if len(parts) != 2 { + if separatorIndex == -1 { return "" } - return parts[1] + return string(de[separatorIndex+len(separator):]) } // GetPrefix implements EntityID#GetPrefix func (de defaultEntityID) GetPrefix() EntityIDPrefix { - parts := strings.SplitN(string(de), separator, 2) + separatorIndex := strings.Index(string(de), separator) - if len(parts) != 2 { + if separatorIndex == -1 { return "" } - return EntityIDPrefix(parts[0]) + return EntityIDPrefix(de[:separatorIndex]) } // String implements EntityID#String @@ -56,7 +56,7 @@ func (de defaultEntityID) String() string { return string(de) } -func newDefaultEntityID(id string) EntityID { +func newDefaultEntityID(id string) defaultEntityID { return defaultEntityID(id) } @@ -111,6 +111,11 @@ func NewEntityIDFromString(plainStringID string) (EntityID, error) { return newDefaultEntityID(plainStringID), nil } +// NewDefaultEntityIDFromStr constructs a default EntityID from a plain string id +func NewDefaultEntityIDFromStr(plainStringID string) EntityID { + return newDefaultEntityID(plainStringID) +} + const ( // ContainerID is the prefix `container_id` ContainerID EntityIDPrefix = "container_id" @@ -129,3 +134,17 @@ const ( // Process is the prefix `process` Process EntityIDPrefix = "process" ) + +// AllPrefixesSet returns a set of all possible entity id prefixes that can be used in the tagger +func AllPrefixesSet() map[EntityIDPrefix]struct{} { + return map[EntityIDPrefix]struct{}{ + ContainerID: {}, + ContainerImageMetadata: {}, + ECSTask: {}, + Host: {}, + KubernetesDeployment: {}, + KubernetesMetadata: {}, + KubernetesPodUID: {}, + Process: {}, + } +} diff --git a/comp/core/tagger/types/entity_id_test.go b/comp/core/tagger/types/entity_id_test.go index a11c4e5184d02..82fbf3f09638b 100644 --- a/comp/core/tagger/types/entity_id_test.go +++ b/comp/core/tagger/types/entity_id_test.go @@ -71,3 +71,10 @@ func TestDefaultEntityID_GetPrefix(t *testing.T) { }) } } + +func TestNewDefaultEntityIDFromStr(t *testing.T) { + str := "container_id://1234" + entityID := NewDefaultEntityIDFromStr(str) + assert.Equal(t, ContainerID, entityID.GetPrefix()) + assert.Equal(t, "1234", entityID.GetID()) +} diff --git a/comp/core/tagger/types/filter_builder.go b/comp/core/tagger/types/filter_builder.go new file mode 100644 index 0000000000000..50eda7f467b6a --- /dev/null +++ b/comp/core/tagger/types/filter_builder.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package types + +import ( + "maps" +) + +// FilterBuilder builds a tagger subscriber filter based on include/exclude rules +type FilterBuilder struct { + prefixesToInclude map[EntityIDPrefix]struct{} + + prefixesToExclude map[EntityIDPrefix]struct{} +} + +// NewFilterBuilder returns a new empty filter builder +func NewFilterBuilder() *FilterBuilder { + return &FilterBuilder{ + prefixesToInclude: make(map[EntityIDPrefix]struct{}), + prefixesToExclude: make(map[EntityIDPrefix]struct{}), + } +} + +// Include includes the specified prefixes in the filter +func (fb *FilterBuilder) Include(prefixes ...EntityIDPrefix) *FilterBuilder { + if fb == nil { + panic("filter builder should not be nil") + } + + for _, prefix := range prefixes { + fb.prefixesToInclude[prefix] = struct{}{} + } + + return fb +} + +// Exclude excludes the specified prefixes from the filter +func (fb *FilterBuilder) Exclude(prefixes ...EntityIDPrefix) *FilterBuilder { + if fb == nil { + panic("filter builder should not be nil") + } + + for _, prefix := range prefixes { + fb.prefixesToExclude[prefix] = struct{}{} + } + + return fb +} + +// Build builds a new Filter object based on the calls to Include and Exclude +// If the builder only excludes prefixes, the created filter will match any prefix except for the excluded ones. +// If the builder only includes prefixes, the created filter will match only the prefixes included in the builder. +// If the builder includes prefixes and excludes prefixes, the created filter will match only prefixes that are included but a not excluded in the builder +// If the builder has neither included nor excluded prefixes, it will match by default all prefixes among `AllPrefixesSet` prefixes +func (fb *FilterBuilder) Build(card TagCardinality) *Filter { + if fb == nil { + panic("filter builder should not be nil") + } + + if len(fb.prefixesToInclude)+len(fb.prefixesToExclude) == 0 { + return newFilter(AllPrefixesSet(), card) + } + + var prefixSet map[EntityIDPrefix]struct{} + + // initialise prefixSet with what should be included + if len(fb.prefixesToInclude) == 0 { + prefixSet = maps.Clone(AllPrefixesSet()) + } else { + prefixSet = maps.Clone(fb.prefixesToInclude) + } + + // exclude unwanted prefixes + for prefix := range fb.prefixesToExclude { + delete(prefixSet, prefix) + } + + return newFilter(prefixSet, card) +} diff --git a/comp/core/tagger/types/filter_builder_test.go b/comp/core/tagger/types/filter_builder_test.go new file mode 100644 index 0000000000000..73d5424103fae --- /dev/null +++ b/comp/core/tagger/types/filter_builder_test.go @@ -0,0 +1,99 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package types + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFilterBuilderOps(t *testing.T) { + tests := []struct { + name string + do func(*FilterBuilder) + buildCard TagCardinality + expectBuildFilter Filter + }{ + { + name: "do nothing", + do: func(_ *FilterBuilder) {}, + buildCard: HighCardinality, + expectBuildFilter: Filter{ + prefixes: AllPrefixesSet(), + cardinality: HighCardinality, + }, + }, + { + name: "only includes", + do: func(fb *FilterBuilder) { + fb.Include(KubernetesDeployment, ContainerID) + fb.Include(Host) + }, + buildCard: HighCardinality, + expectBuildFilter: Filter{ + prefixes: map[EntityIDPrefix]struct{}{ + KubernetesDeployment: {}, + ContainerID: {}, + Host: {}, + }, + cardinality: HighCardinality, + }, + }, + { + name: "only excludes", + do: func(fb *FilterBuilder) { + fb.Exclude(KubernetesDeployment, ContainerID) + fb.Exclude(Host) + }, + buildCard: HighCardinality, + expectBuildFilter: Filter{ + prefixes: map[EntityIDPrefix]struct{}{ + ContainerImageMetadata: {}, + ECSTask: {}, + KubernetesMetadata: {}, + KubernetesPodUID: {}, + Process: {}, + }, + cardinality: HighCardinality, + }, + }, + { + name: "both includes and excludes", + do: func(fb *FilterBuilder) { + fb.Include(ContainerImageMetadata) + fb.Exclude(KubernetesDeployment, ContainerID) + fb.Include(ContainerID) + fb.Exclude(Host, KubernetesMetadata) + fb.Include(Host, Process) + }, + buildCard: HighCardinality, + expectBuildFilter: Filter{ + prefixes: map[EntityIDPrefix]struct{}{ + ContainerImageMetadata: {}, + Process: {}, + }, + cardinality: HighCardinality, + }, + }, + } + + for _, test := range tests { + fb := NewFilterBuilder() + test.do(fb) + filter := fb.Build(test.buildCard) + assert.Truef(t, reflect.DeepEqual(*filter, test.expectBuildFilter), "expected %v, found %v", test.expectBuildFilter, filter) + } +} + +func TestNilFilterBuilderOps(t *testing.T) { + var fb *FilterBuilder + + assert.Panics(t, func() { fb.Include(ContainerID) }) + assert.Panics(t, func() { fb.Exclude(ContainerID) }) + assert.Panics(t, func() { fb.Build(HighCardinality) }) +} diff --git a/comp/core/tagger/types/filters.go b/comp/core/tagger/types/filters.go new file mode 100644 index 0000000000000..436b334e3bff2 --- /dev/null +++ b/comp/core/tagger/types/filters.go @@ -0,0 +1,60 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package types + +import ( + "maps" +) + +// NewMatchAllFilter returns a filter that matches any prefix +func NewMatchAllFilter() *Filter { + return nil +} + +// Filter represents a subscription filter for the tagger +type Filter struct { + prefixes map[EntityIDPrefix]struct{} + cardinality TagCardinality +} + +func newFilter(prefixes map[EntityIDPrefix]struct{}, cardinality TagCardinality) *Filter { + return &Filter{ + prefixes: maps.Clone(prefixes), + cardinality: cardinality, + } +} + +// GetPrefixes returns the prefix set of the filter +// If the filter is nil, a set containing all possible prefixes is returned +func (f *Filter) GetPrefixes() map[EntityIDPrefix]struct{} { + if f == nil { + return AllPrefixesSet() + } + + return maps.Clone(f.prefixes) +} + +// GetCardinality returns the filter cardinality +// If the filter is nil, High cardinality is returned +func (f *Filter) GetCardinality() TagCardinality { + if f == nil { + return HighCardinality + } + + return f.cardinality +} + +// MatchesPrefix returns whether or not the filter matches the prefix passed as argument +func (f *Filter) MatchesPrefix(prefix EntityIDPrefix) bool { + // A nil filter should match everything + if f == nil { + return true + } + + _, found := f.prefixes[prefix] + + return found +} diff --git a/comp/core/tagger/types/filters_test.go b/comp/core/tagger/types/filters_test.go new file mode 100644 index 0000000000000..97827d218a069 --- /dev/null +++ b/comp/core/tagger/types/filters_test.go @@ -0,0 +1,45 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package types + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFilterOps(t *testing.T) { + f := Filter{ + prefixes: map[EntityIDPrefix]struct{}{ + KubernetesDeployment: {}, + KubernetesPodUID: {}, + }, + cardinality: OrchestratorCardinality, + } + + // assert cardinality is correct + cardinality := f.GetCardinality() + assert.Equal(t, OrchestratorCardinality, cardinality) + + // assert GetPrefixes + expectedPrefixes := map[EntityIDPrefix]struct{}{ + KubernetesDeployment: {}, + KubernetesPodUID: {}, + } + assert.Truef(t, reflect.DeepEqual(expectedPrefixes, f.GetPrefixes()), "expected %v, found %v", expectedPrefixes, f.GetPrefixes()) +} + +func TestNilFilter(t *testing.T) { + var f *Filter + + assert.Truef(t, reflect.DeepEqual(f.GetPrefixes(), AllPrefixesSet()), "expected %v, found %v", AllPrefixesSet(), f.GetPrefixes()) + assert.Equalf(t, HighCardinality, f.GetCardinality(), "nil filter should have cardinality HIGH, found %v", f.GetCardinality()) + + for prefix := range AllPrefixesSet() { + assert.Truef(t, f.MatchesPrefix(prefix), "nil filter should match any prefix, didn't match %v", prefix) + } +} diff --git a/comp/core/tagger/types/go.mod b/comp/core/tagger/types/go.mod index 5abfecabc88c2..d13bc17b0bd15 100644 --- a/comp/core/tagger/types/go.mod +++ b/comp/core/tagger/types/go.mod @@ -13,7 +13,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil @@ -40,7 +42,9 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect @@ -69,7 +73,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -77,12 +81,12 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/tagger/types/go.sum b/comp/core/tagger/types/go.sum index 77ba213060c82..765bdc23a7bf4 100644 --- a/comp/core/tagger/types/go.sum +++ b/comp/core/tagger/types/go.sum @@ -180,8 +180,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -251,15 +252,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +296,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -312,8 +313,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/comp/core/tagger/types/types.go b/comp/core/tagger/types/types.go index e9e16653cf9aa..a66749bae5c28 100644 --- a/comp/core/tagger/types/types.go +++ b/comp/core/tagger/types/types.go @@ -22,16 +22,23 @@ type ApplyFunc[V any] func(EntityID, V) type ObjectStore[V any] interface { // Get returns an object with the specified entity ID if it exists in the store Get(EntityID) (V, bool) + // GetWithEntityIDStr returns an object with the specified entity ID if it + // exists in the store. + // This function is needed only for performance reasons. It functions like + // Get, but accepts a string instead of an EntityID, creating the EntityID + // internally. This reduces the allocations that occur when an EntityID is + // passed as a parameter. + GetWithEntityIDStr(string) (V, bool) // Set sets a given entityID to a given object in the store Set(EntityID, V) // Unset unsets a given entityID in the store Unset(EntityID) // Size returns the total number of objects in the store Size() int - // ListObjects returns a slice containing all objects of the store - ListObjects() []V - // ForEach applies a given function to each object in the store - ForEach(ApplyFunc[V]) + // ListObjects returns a slice containing objects of the store matching the filter + ListObjects(*Filter) []V + // ForEach applies a given function to each object in the store matching the filter + ForEach(*Filter, ApplyFunc[V]) } // TaggerListResponse holds the tagger list response @@ -197,3 +204,13 @@ func (e EntityIDPrefix) ToUID(id string) string { } return fmt.Sprintf("%s://%s", e, id) } + +// Subscription can be used by external subscribing components to interact with tagger events +type Subscription interface { + // EventsChan returns a channel on which the subscriber can receive tagger events + EventsChan() chan []EntityEvent + // ID returns the id of the subscription + ID() string + // Unsubscribe is used cancel subscription to the tagger + Unsubscribe() +} diff --git a/comp/core/telemetry/telemetryimpl/telemetry_mock.go b/comp/core/telemetry/telemetryimpl/telemetry_mock.go index 5d7a161c299cf..5d33b0fb4538e 100644 --- a/comp/core/telemetry/telemetryimpl/telemetry_mock.go +++ b/comp/core/telemetry/telemetryimpl/telemetry_mock.go @@ -44,9 +44,10 @@ func newMock(deps testDependencies) telemetry.Mock { telemetry := &telemetryImplMock{ telemetryImpl{ - mutex: &mutex, - registry: reg, - meterProvider: provider, + mutex: &mutex, + registry: reg, + meterProvider: provider, + defaultRegistry: prometheus.NewRegistry(), }, } diff --git a/comp/core/workloadmeta/collectors/internal/cloudfoundry/container/cf_container.go b/comp/core/workloadmeta/collectors/internal/cloudfoundry/container/cf_container.go index d19169605b4cc..172cfc3ea8c0b 100644 --- a/comp/core/workloadmeta/collectors/internal/cloudfoundry/container/cf_container.go +++ b/comp/core/workloadmeta/collectors/internal/cloudfoundry/container/cf_container.go @@ -15,8 +15,8 @@ import ( "go.uber.org/fx" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/common" @@ -57,7 +57,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } // Detect if we're on a PCF container - if !config.Datadog().GetBool("cloud_foundry_buildpack") { + if !pkgconfigsetup.Datadog().GetBool("cloud_foundry_buildpack") { return errors.NewDisabled(componentName, "Agent is not running on a CloudFoundry container") } diff --git a/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm.go b/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm.go index 7f8cb0263a984..6c274f1790449 100644 --- a/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm.go +++ b/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm.go @@ -15,8 +15,8 @@ import ( "go.uber.org/fx" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" @@ -72,10 +72,10 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error return err } - c.nodeName = config.Datadog().GetString("bosh_id") + c.nodeName = pkgconfigsetup.Datadog().GetString("bosh_id") // Check for Cluster Agent availability (will be retried at each pull) - c.dcaEnabled = config.Datadog().GetBool("cluster_agent.enabled") + c.dcaEnabled = pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") c.dcaClient = c.getDCAClient() return nil diff --git a/comp/core/workloadmeta/collectors/internal/containerd/containerd.go b/comp/core/workloadmeta/collectors/internal/containerd/containerd.go index 08a592772291d..efc5524d149a2 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/containerd.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/containerd.go @@ -20,8 +20,8 @@ import ( "go.uber.org/fx" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" agentErrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/sbom/scanner" "github.com/DataDog/datadog-agent/pkg/status/health" @@ -430,5 +430,5 @@ func (c *collector) cacheExitInfo(id string, exitCode *int64, exitTS time.Time) } func imageMetadataCollectionIsEnabled() bool { - return config.Datadog().GetBool("container_image.enabled") + return pkgconfigsetup.Datadog().GetBool("container_image.enabled") } diff --git a/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go b/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go index 91dbfca719f09..9867d2a6cb093 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go @@ -14,7 +14,7 @@ import ( "github.com/CycloneDX/cyclonedx-go" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/sbom/collectors/containerd" @@ -23,7 +23,7 @@ import ( ) func sbomCollectionIsEnabled() bool { - return imageMetadataCollectionIsEnabled() && config.Datadog().GetBool("sbom.container_image.enabled") + return imageMetadataCollectionIsEnabled() && pkgconfigsetup.Datadog().GetBool("sbom.container_image.enabled") } func (c *collector) startSBOMCollection(ctx context.Context) error { diff --git a/comp/core/workloadmeta/collectors/internal/containerd/network_linux.go b/comp/core/workloadmeta/collectors/internal/containerd/network_linux.go index f42b8cc219d39..0a56688795975 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/network_linux.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/network_linux.go @@ -12,8 +12,8 @@ import ( "github.com/containerd/containerd" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" cutil "github.com/DataDog/datadog-agent/pkg/util/containerd" "github.com/DataDog/datadog-agent/pkg/util/system" ) @@ -48,7 +48,7 @@ func extractIP(namespace string, container containerd.Container, containerdClien // of them. for _, taskPid := range taskPids { IPs, err := system.ParseProcessIPs( - config.Datadog().GetString("container_proc_root"), + pkgconfigsetup.Datadog().GetString("container_proc_root"), int(taskPid.Pid), func(ip string) bool { return ip != "127.0.0.1" }, ) diff --git a/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go b/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go index fbeb54b4377c7..12f6d016421a3 100644 --- a/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go +++ b/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go @@ -15,7 +15,7 @@ import ( "github.com/CycloneDX/cyclonedx-go" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/sbom/collectors/docker" "github.com/DataDog/datadog-agent/pkg/sbom/scanner" @@ -24,11 +24,11 @@ import ( ) func imageMetadataCollectionIsEnabled() bool { - return config.Datadog().GetBool("container_image.enabled") + return pkgconfigsetup.Datadog().GetBool("container_image.enabled") } func sbomCollectionIsEnabled() bool { - return imageMetadataCollectionIsEnabled() && config.Datadog().GetBool("sbom.container_image.enabled") + return imageMetadataCollectionIsEnabled() && pkgconfigsetup.Datadog().GetBool("sbom.container_image.enabled") } func (c *collector) startSBOMCollection(ctx context.Context) error { diff --git a/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go b/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go index 8dd66cacd8a79..f4a54be516779 100644 --- a/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go +++ b/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go @@ -19,8 +19,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configutils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" @@ -83,7 +83,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error // If DCA is enabled and can't communicate with the DCA, let worloadmeta retry. var errDCA error - if config.Datadog().GetBool("cluster_agent.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") { c.dcaEnabled = false c.dcaClient, errDCA = clusteragent.GetClusterAgentClient() if errDCA != nil { @@ -95,7 +95,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } // We return the permanent fail only if fallback is disabled - if retry.IsErrPermaFail(errDCA) && !config.Datadog().GetBool("cluster_agent.tagging_fallback") { + if retry.IsErrPermaFail(errDCA) && !pkgconfigsetup.Datadog().GetBool("cluster_agent.tagging_fallback") { return errDCA } @@ -106,7 +106,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } // Fallback to local metamapper if DCA not enabled, or in permafail state with fallback enabled. - if !config.Datadog().GetBool("cluster_agent.enabled") || errDCA != nil { + if !pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") || errDCA != nil { // Using GetAPIClient as error returned follows the IsErrWillRetry/IsErrPermaFail // Workloadmeta will retry calling this method until permafail c.apiClient, err = apiserver.GetAPIClient() @@ -115,9 +115,9 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } } - c.updateFreq = time.Duration(config.Datadog().GetInt("kubernetes_metadata_tag_update_freq")) * time.Second + c.updateFreq = time.Duration(pkgconfigsetup.Datadog().GetInt("kubernetes_metadata_tag_update_freq")) * time.Second - metadataAsTags := configutils.GetMetadataAsTags(config.Datadog()) + metadataAsTags := configutils.GetMetadataAsTags(pkgconfigsetup.Datadog()) c.collectNamespaceLabels = len(metadataAsTags.GetNamespaceLabelsAsTags()) > 0 c.collectNamespaceAnnotations = len(metadataAsTags.GetNamespaceAnnotationsAsTags()) > 0 diff --git a/comp/core/workloadmeta/collectors/internal/podman/podman.go b/comp/core/workloadmeta/collectors/internal/podman/podman.go index 9c171d5ed168c..d6b1bfa240539 100644 --- a/comp/core/workloadmeta/collectors/internal/podman/podman.go +++ b/comp/core/workloadmeta/collectors/internal/podman/podman.go @@ -18,8 +18,8 @@ import ( "go.uber.org/fx" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" dderrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -68,7 +68,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } var dbPath string - dbPath = config.Datadog().GetString("podman_db_path") + dbPath = pkgconfigsetup.Datadog().GetString("podman_db_path") // We verify the user-provided path exists to prevent the collector entering a failing loop. if dbPath != "" && !dbIsAccessible(dbPath) { @@ -161,7 +161,11 @@ func convertToEvent(container *podman.Container) workloadmeta.CollectorEvent { log.Warnf("Could not get env vars for container %s", containerID) } - image, err := workloadmeta.NewContainerImage(container.Config.ContainerRootFSConfig.RootfsImageID, container.Config.RawImageName) + imageName := container.Config.RawImageName + if imageName == "" { + imageName = container.Config.RootfsImageName + } + image, err := workloadmeta.NewContainerImage(container.Config.ContainerRootFSConfig.RootfsImageID, imageName) if err != nil { log.Warnf("Could not get image for container %s", containerID) } diff --git a/comp/core/workloadmeta/collectors/internal/process/process_collector.go b/comp/core/workloadmeta/collectors/internal/process/process_collector.go index ee69b4c91e7fd..3d677913016bd 100644 --- a/comp/core/workloadmeta/collectors/internal/process/process_collector.go +++ b/comp/core/workloadmeta/collectors/internal/process/process_collector.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" processwlm "github.com/DataDog/datadog-agent/pkg/process/metadata/workloadmeta" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" @@ -50,7 +50,7 @@ type collector struct { // NewCollector returns a new local process collector provider and an error. // Currently, this is only used on Linux when language detection and run in core agent are enabled. func NewCollector() (workloadmeta.CollectorProvider, error) { - wlmExtractor := processwlm.GetSharedWorkloadMetaExtractor(config.SystemProbe()) + wlmExtractor := processwlm.GetSharedWorkloadMetaExtractor(pkgconfigsetup.SystemProbe()) processData := NewProcessData() processData.Register(wlmExtractor) @@ -81,7 +81,7 @@ func (c *collector) Start(ctx context.Context, store workloadmeta.Component) err // If process collection is disabled, the collector will gather the basic process and container data // necessary for language detection. - if !config.Datadog().GetBool("process_config.process_collection.enabled") { + if !pkgconfigsetup.Datadog().GetBool("process_config.process_collection.enabled") { collectionTicker := c.collectionClock.Ticker(10 * time.Second) if c.containerProvider == nil { c.containerProvider = proccontainers.GetSharedContainerProvider(store) diff --git a/comp/core/workloadmeta/collectors/internal/remote/generic.go b/comp/core/workloadmeta/collectors/internal/remote/generic.go index 51b122cf13a4f..3a697cc50ae0f 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/generic.go +++ b/comp/core/workloadmeta/collectors/internal/remote/generic.go @@ -25,7 +25,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/telemetry" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -148,7 +148,7 @@ func (c *GenericCollector) startWorkloadmetaStream(maxElapsed time.Duration) err default: } - token, err := security.FetchAuthToken(pkgconfig.Datadog()) + token, err := security.FetchAuthToken(pkgconfigsetup.Datadog()) if err != nil { err = fmt.Errorf("unable to fetch authentication token: %w", err) log.Warnf("unable to establish entity stream between agents, will possibly retry: %s", err) @@ -179,7 +179,7 @@ func (c *GenericCollector) startWorkloadmetaStream(maxElapsed time.Duration) err // Run will run the generic collector streaming loop func (c *GenericCollector) Run() { - recvWithoutTimeout := pkgconfig.Datadog().GetBool("workloadmeta.remote.recv_without_timeout") + recvWithoutTimeout := pkgconfigsetup.Datadog().GetBool("workloadmeta.remote.recv_without_timeout") for { select { diff --git a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go index 40ee9c33cd0e1..fc36cfa790f25 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go +++ b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go @@ -20,7 +20,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/remote" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" @@ -72,7 +73,7 @@ func (s *stream) Recv() (interface{}, error) { type streamHandler struct { port int - config.Reader + model.Reader } // workloadmetaEventFromProcessEventSet converts the given ProcessEventSet into a workloadmeta.Event @@ -119,7 +120,7 @@ func NewCollector() (workloadmeta.CollectorProvider, error) { Collector: &remote.GenericCollector{ CollectorID: collectorID, // TODO(components): make sure StreamHandler uses the config component not pkg/config - StreamHandler: &streamHandler{Reader: config.Datadog()}, + StreamHandler: &streamHandler{Reader: pkgconfigsetup.Datadog()}, Catalog: workloadmeta.NodeAgent, Insecure: true, // wlm extractor currently does not support TLS }, diff --git a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go index a9e46426cc829..71e47ded6be31 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go +++ b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go @@ -30,7 +30,7 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -65,11 +65,11 @@ func (s *mockServer) StreamEntities(_ *pbgo.ProcessStreamEntitiesRequest, out pb func TestCollection(t *testing.T) { // Create Auth Token for the client - if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfig.Datadog())); os.IsNotExist(err) { - security.CreateOrFetchToken(pkgconfig.Datadog()) + if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())); os.IsNotExist(err) { + security.CreateOrFetchToken(pkgconfigsetup.Datadog()) defer func() { // cleanup - os.Remove(security.GetAuthTokenFilepath(pkgconfig.Datadog())) + os.Remove(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())) }() } creationTime := time.Now().Unix() @@ -296,18 +296,25 @@ func TestCollection(t *testing.T) { require.NoError(t, err) // Number of events expected. Each response can hold multiple events, either Set or Unset - numberOfEvents := len(test.preEvents) + expectedNumberOfEvents := len(test.preEvents) for _, ev := range test.serverResponses { - numberOfEvents += len(ev.SetEvents) + len(ev.UnsetEvents) + expectedNumberOfEvents += len(ev.SetEvents) + len(ev.UnsetEvents) } // Keep listening to workloadmeta until enough events are received. It is possible that the // first bundle does not hold any events. Thus, it is required to look at the number of events // in the bundle. - for i := 0; i < numberOfEvents; { + // Also, when a problem occurs and a re-sync is triggered, we might + // receive duplicate events, so we need to keep a map of received + // events to account for duplicates. + eventsReceived := make(map[workloadmeta.Event]struct{}) + for len(eventsReceived) < expectedNumberOfEvents { bundle := <-ch - close(bundle.Ch) - i += len(bundle.Events) + bundle.Acknowledge() + + for _, ev := range bundle.Events { + eventsReceived[ev] = struct{}{} + } } mockStore.Unsubscribe(ch) diff --git a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go index 79d69f7bb33b2..ec4770a214260 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go +++ b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go @@ -18,7 +18,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/remote" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/proto" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" ) @@ -88,7 +89,7 @@ func (s *stream) Recv() (interface{}, error) { type streamHandler struct { port int filter *workloadmeta.Filter - config.Config + model.Config } // NewCollector returns a CollectorProvider to build a remote workloadmeta collector, and an error if any. @@ -102,7 +103,7 @@ func NewCollector(deps dependencies) (workloadmeta.CollectorProvider, error) { CollectorID: collectorID, StreamHandler: &streamHandler{ filter: deps.Params.Filter, - Config: config.Datadog(), + Config: pkgconfigsetup.Datadog(), }, Catalog: workloadmeta.Remote, }, diff --git a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go index f4e4b47395b23..9045a35ac7c37 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go +++ b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go @@ -29,7 +29,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/proto" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/server" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -191,11 +191,11 @@ func TestHandleWorkloadmetaStreamResponse(t *testing.T) { func TestCollection(t *testing.T) { // Create Auth Token for the client - if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfig.Datadog())); os.IsNotExist(err) { - security.CreateOrFetchToken(pkgconfig.Datadog()) + if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())); os.IsNotExist(err) { + security.CreateOrFetchToken(pkgconfigsetup.Datadog()) defer func() { // cleanup - os.Remove(security.GetAuthTokenFilepath(pkgconfig.Datadog())) + os.Remove(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())) }() } diff --git a/comp/core/workloadmeta/collectors/util/process_util_linux.go b/comp/core/workloadmeta/collectors/util/process_util_linux.go index 840ff7f7088ac..62e4381e00cd7 100644 --- a/comp/core/workloadmeta/collectors/util/process_util_linux.go +++ b/comp/core/workloadmeta/collectors/util/process_util_linux.go @@ -8,7 +8,7 @@ package util import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/flavor" ) @@ -20,8 +20,8 @@ func LocalProcessCollectorIsEnabled() bool { return false } - processChecksInCoreAgent := config.Datadog().GetBool("process_config.run_in_core_agent.enabled") - langDetectionEnabled := config.Datadog().GetBool("language_detection.enabled") + processChecksInCoreAgent := pkgconfigsetup.Datadog().GetBool("process_config.run_in_core_agent.enabled") + langDetectionEnabled := pkgconfigsetup.Datadog().GetBool("language_detection.enabled") return langDetectionEnabled && processChecksInCoreAgent } diff --git a/comp/dogstatsd/listeners/named_pipe_nowindows.go b/comp/dogstatsd/listeners/named_pipe_nowindows.go index 827468549b927..3e8ca506d9808 100644 --- a/comp/dogstatsd/listeners/named_pipe_nowindows.go +++ b/comp/dogstatsd/listeners/named_pipe_nowindows.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // NamedPipeListener implements the StatsdListener interface for named pipe protocol. @@ -22,7 +22,7 @@ type NamedPipeListener struct{} // //nolint:revive // TODO(AML) Fix revive linter func NewNamedPipeListener(_ string, _ chan packets.Packets, - _ *packets.PoolManager[packets.Packet], _ config.Reader, _ replay.Component, _ *TelemetryStore, _ *packets.TelemetryStore, _ telemetry.Component) (*NamedPipeListener, error) { + _ *packets.PoolManager[packets.Packet], _ model.Reader, _ replay.Component, _ *TelemetryStore, _ *packets.TelemetryStore, _ telemetry.Component) (*NamedPipeListener, error) { return nil, errors.New("named pipe is only supported on Windows") } diff --git a/comp/dogstatsd/listeners/named_pipe_windows.go b/comp/dogstatsd/listeners/named_pipe_windows.go index e50c08629277c..a7061013b9adc 100644 --- a/comp/dogstatsd/listeners/named_pipe_windows.go +++ b/comp/dogstatsd/listeners/named_pipe_windows.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" winio "github.com/Microsoft/go-winio" @@ -42,7 +42,7 @@ type NamedPipeListener struct { // NewNamedPipeListener returns an named pipe Statsd listener func NewNamedPipeListener(pipeName string, packetOut chan packets.Packets, - sharedPacketPoolManager *packets.PoolManager[packets.Packet], cfg config.Reader, capture replay.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetrycomp telemetry.Component) (*NamedPipeListener, error) { + sharedPacketPoolManager *packets.PoolManager[packets.Packet], cfg model.Reader, capture replay.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetrycomp telemetry.Component) (*NamedPipeListener, error) { bufferSize := cfg.GetInt("dogstatsd_buffer_size") return newNamedPipeListener( diff --git a/comp/dogstatsd/listeners/ratelimit/mem_based_rate_limiter.go b/comp/dogstatsd/listeners/ratelimit/mem_based_rate_limiter.go index fb335ae81e258..e1ceef441c14c 100644 --- a/comp/dogstatsd/listeners/ratelimit/mem_based_rate_limiter.go +++ b/comp/dogstatsd/listeners/ratelimit/mem_based_rate_limiter.go @@ -13,7 +13,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -46,7 +46,7 @@ var ballast []byte //nolint:unused var ballastOnce sync.Once // BuildMemBasedRateLimiter builds a new instance of *MemBasedRateLimiter -func BuildMemBasedRateLimiter(cfg config.Reader, telemetry telemetry.Component) (*MemBasedRateLimiter, error) { +func BuildMemBasedRateLimiter(cfg model.Reader, telemetry telemetry.Component) (*MemBasedRateLimiter, error) { var memoryUsage memoryUsage var err error if memoryUsage, err = newCgroupMemoryUsage(); err == nil { @@ -91,7 +91,7 @@ func BuildMemBasedRateLimiter(cfg config.Reader, telemetry telemetry.Component) ) } -func getConfigFloat(cfg config.Reader, subkey string) float64 { +func getConfigFloat(cfg model.Reader, subkey string) float64 { return cfg.GetFloat64("dogstatsd_mem_based_rate_limiter." + subkey) } diff --git a/comp/dogstatsd/listeners/udp.go b/comp/dogstatsd/listeners/udp.go index 4d528714b0dc0..f7b71c49e87ee 100644 --- a/comp/dogstatsd/listeners/udp.go +++ b/comp/dogstatsd/listeners/udp.go @@ -15,7 +15,8 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -50,7 +51,7 @@ type UDPListener struct { } // NewUDPListener returns an idle UDP Statsd listener -func NewUDPListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], cfg config.Reader, capture replay.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore) (*UDPListener, error) { +func NewUDPListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], cfg model.Reader, capture replay.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore) (*UDPListener, error) { var err error var url string @@ -63,7 +64,7 @@ func NewUDPListener(packetOut chan packets.Packets, sharedPacketPoolManager *pac // Listen to all network interfaces url = fmt.Sprintf(":%s", port) } else { - url = net.JoinHostPort(config.GetBindHostFromConfig(cfg), port) + url = net.JoinHostPort(pkgconfigsetup.GetBindHostFromConfig(cfg), port) } addr, err := net.ResolveUDPAddr("udp", url) diff --git a/comp/dogstatsd/listeners/uds_common.go b/comp/dogstatsd/listeners/uds_common.go index 4465ead476240..b8480c8ff2ef7 100644 --- a/comp/dogstatsd/listeners/uds_common.go +++ b/comp/dogstatsd/listeners/uds_common.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" @@ -56,7 +56,7 @@ type UDSListener struct { trafficCapture replay.Component pidMap pidmap.Component OriginDetection bool - config config.Reader + config model.Reader wmeta optional.Option[workloadmeta.Component] @@ -79,7 +79,7 @@ type UDSListener struct { // CloseFunction is a function that closes a connection type CloseFunction func(unixConn *net.UnixConn) error -func setupUnixConn(conn *net.UnixConn, originDetection bool, config config.Reader) (bool, error) { +func setupUnixConn(conn *net.UnixConn, originDetection bool, config model.Reader) (bool, error) { if originDetection { err := enableUDSPassCred(conn) if err != nil { @@ -133,7 +133,7 @@ func NewUDSOobPoolManager() *packets.PoolManager[[]byte] { } // NewUDSListener returns an idle UDS Statsd listener -func NewUDSListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg config.Reader, capture replay.Component, transport string, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSListener, error) { +func NewUDSListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, transport string, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSListener, error) { originDetection := cfg.GetBool("dogstatsd_origin_detection") listener := &UDSListener{ diff --git a/comp/dogstatsd/listeners/uds_datagram.go b/comp/dogstatsd/listeners/uds_datagram.go index 5f6a59f1d86ce..654cbad487bed 100644 --- a/comp/dogstatsd/listeners/uds_datagram.go +++ b/comp/dogstatsd/listeners/uds_datagram.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -27,7 +27,7 @@ type UDSDatagramListener struct { } // NewUDSDatagramListener returns an idle UDS datagram Statsd listener -func NewUDSDatagramListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPoolManager *packets.PoolManager[[]byte], cfg config.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetryComponent telemetry.Component) (*UDSDatagramListener, error) { +func NewUDSDatagramListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetryComponent telemetry.Component) (*UDSDatagramListener, error) { socketPath := cfg.GetString("dogstatsd_socket") transport := "unixgram" diff --git a/comp/dogstatsd/listeners/uds_stream.go b/comp/dogstatsd/listeners/uds_stream.go index 2c04ef3e9a0cf..494f78a93fbc6 100644 --- a/comp/dogstatsd/listeners/uds_stream.go +++ b/comp/dogstatsd/listeners/uds_stream.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -29,7 +29,7 @@ type UDSStreamListener struct { } // NewUDSStreamListener returns an idle UDS datagram Statsd listener -func NewUDSStreamListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg config.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSStreamListener, error) { +func NewUDSStreamListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSStreamListener, error) { socketPath := cfg.GetString("dogstatsd_stream_socket") transport := "unix" diff --git a/comp/dogstatsd/mapper/mapper_test.go b/comp/dogstatsd/mapper/mapper_test.go index ff05c83dcc281..c6139d4febf12 100644 --- a/comp/dogstatsd/mapper/mapper_test.go +++ b/comp/dogstatsd/mapper/mapper_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" configComponent "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/pkg/config/structure" ) func TestMappings(t *testing.T) { @@ -519,7 +520,7 @@ func getMapper(t *testing.T, configString string) (*MetricMapper, error) { cfg := configComponent.NewMockFromYAML(t, configString) - err := cfg.UnmarshalKey("dogstatsd_mapper_profiles", &profiles) + err := structure.UnmarshalKey(cfg, "dogstatsd_mapper_profiles", &profiles) if err != nil { return nil, err } diff --git a/comp/dogstatsd/packets/packet_manager_windows.go b/comp/dogstatsd/packets/packet_manager_windows.go index 695415f93ef60..32f79b661720f 100644 --- a/comp/dogstatsd/packets/packet_manager_windows.go +++ b/comp/dogstatsd/packets/packet_manager_windows.go @@ -9,7 +9,7 @@ package packets import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // PacketManager gathers everything required to create and assemble packets. @@ -20,7 +20,7 @@ type PacketManager struct { } // NewPacketManagerFromConfig creates a PacketManager from the relevant config settings. -func NewPacketManagerFromConfig(packetOut chan Packets, sharedPacketPoolManager *PoolManager[Packet], cfg config.Reader, telemetryStore *TelemetryStore) *PacketManager { +func NewPacketManagerFromConfig(packetOut chan Packets, sharedPacketPoolManager *PoolManager[Packet], cfg model.Reader, telemetryStore *TelemetryStore) *PacketManager { bufferSize := cfg.GetInt("dogstatsd_buffer_size") packetsBufferSize := cfg.GetInt("dogstatsd_packet_buffer_size") flushTimeout := cfg.GetDuration("dogstatsd_packet_buffer_flush_timeout") diff --git a/comp/dogstatsd/packets/pool.go b/comp/dogstatsd/packets/pool.go index 527c970834367..f0ee0b0a2a359 100644 --- a/comp/dogstatsd/packets/pool.go +++ b/comp/dogstatsd/packets/pool.go @@ -6,7 +6,7 @@ package packets import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) @@ -42,7 +42,7 @@ func NewPool(bufferSize int, packetsTelemetry *TelemetryStore) *Pool { return packet }), // telemetry - tlmEnabled: usedByTestTelemetry || utils.IsTelemetryEnabled(config.Datadog()), + tlmEnabled: usedByTestTelemetry || utils.IsTelemetryEnabled(pkgconfigsetup.Datadog()), packetsTelemetry: packetsTelemetry, } } diff --git a/comp/dogstatsd/replay/impl/capture.go b/comp/dogstatsd/replay/impl/capture.go index e299487388ef0..c2800904f15ee 100644 --- a/comp/dogstatsd/replay/impl/capture.go +++ b/comp/dogstatsd/replay/impl/capture.go @@ -19,7 +19,7 @@ import ( compdef "github.com/DataDog/datadog-agent/comp/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) //nolint:revive // TODO(AML) Fix revive linter @@ -31,7 +31,7 @@ type Requires struct { // trafficCapture allows capturing traffic from our listeners and writing it to file type trafficCapture struct { writer *TrafficCaptureWriter - config config.Reader + config model.Reader startUpError error sync.RWMutex diff --git a/comp/dogstatsd/server/batch.go b/comp/dogstatsd/server/batch.go index 06c7fd7a615df..d340be63c4121 100644 --- a/comp/dogstatsd/server/batch.go +++ b/comp/dogstatsd/server/batch.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" @@ -94,15 +94,15 @@ func (s *shardKeyGeneratorPerOrigin) Generate(sample metrics.MetricSample, shard // We fall back on the generic sharding if: // - the sample has a custom cardinality // - we don't have the origin - if sample.OriginInfo.Cardinality != "" || (sample.OriginInfo.FromUDS == "" && sample.OriginInfo.FromTag == "" && sample.OriginInfo.FromMsg == "") { + if sample.OriginInfo.Cardinality != "" || (sample.OriginInfo.ContainerIDFromSocket == "" && sample.OriginInfo.PodUID == "" && sample.OriginInfo.ContainerID == "") { return s.shardKeyGeneratorBase.Generate(sample, shards) } // Otherwise, we isolate the samples based on the origin. i, j := uint64(0), uint64(0) - i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.FromTag) - i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.FromMsg) - i, _ = murmur3.SeedStringSum128(i, j, sample.OriginInfo.FromUDS) + i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.PodUID) + i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.ContainerID) + i, _ = murmur3.SeedStringSum128(i, j, sample.OriginInfo.ContainerIDFromSocket) return fastrange(ckey.ContextKey(i), shards) } @@ -162,7 +162,7 @@ func newBatcher(demux aggregator.DemultiplexerWithAggregator, tlmChannel telemet } func getShardGenerator() shardKeyGenerator { - isolated := config.Datadog().GetString("dogstatsd_pipeline_autoadjust_strategy") == aggregator.AutoAdjustStrategyPerOrigin + isolated := pkgconfigsetup.Datadog().GetString("dogstatsd_pipeline_autoadjust_strategy") == aggregator.AutoAdjustStrategyPerOrigin base := shardKeyGeneratorBase{ keyGenerator: ckey.NewKeyGenerator(), diff --git a/comp/dogstatsd/server/enrich.go b/comp/dogstatsd/server/enrich.go index a5b2c04516e92..4441521b66bad 100644 --- a/comp/dogstatsd/server/enrich.go +++ b/comp/dogstatsd/server/enrich.go @@ -40,10 +40,10 @@ func extractTagsMetadata(tags []string, originFromUDS string, originFromMsg []by host := conf.defaultHostname metricSource := metrics.MetricSourceDogstatsd origin := taggertypes.OriginInfo{ - FromUDS: originFromUDS, - FromMsg: string(originFromMsg), - ExternalData: externalData, - ProductOrigin: taggertypes.ProductOriginDogStatsD, + ContainerIDFromSocket: originFromUDS, + ContainerID: string(originFromMsg), + ExternalData: externalData, + ProductOrigin: taggertypes.ProductOriginDogStatsD, } n := 0 @@ -52,7 +52,7 @@ func extractTagsMetadata(tags []string, originFromUDS string, originFromMsg []by host = tag[len(hostTagPrefix):] continue } else if strings.HasPrefix(tag, entityIDTagPrefix) { - origin.FromTag = tag[len(entityIDTagPrefix):] + origin.PodUID = tag[len(entityIDTagPrefix):] continue } else if strings.HasPrefix(tag, CardinalityTagPrefix) { origin.Cardinality = tag[len(CardinalityTagPrefix):] diff --git a/comp/dogstatsd/server/enrich_test.go b/comp/dogstatsd/server/enrich_test.go index f6eafda67c579..29b79597fac09 100644 --- a/comp/dogstatsd/server/enrich_test.go +++ b/comp/dogstatsd/server/enrich_test.go @@ -98,9 +98,9 @@ func TestConvertParseMultiple(t *testing.T) { assert.Equal(t, metricType, parsed[0].Mtype) assert.Equal(t, 0, len(parsed[0].Tags)) assert.Equal(t, "default-hostname", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) assert.Equal(t, "daemon", parsed[1].Name) @@ -108,9 +108,9 @@ func TestConvertParseMultiple(t *testing.T) { assert.Equal(t, metricType, parsed[1].Mtype) assert.Equal(t, 0, len(parsed[1].Tags)) assert.Equal(t, "default-hostname", parsed[1].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[1].SampleRate, epsilon) } } @@ -132,9 +132,9 @@ func TestConvertParseSingle(t *testing.T) { assert.Equal(t, metricType, parsed[0].Mtype) assert.Equal(t, 0, len(parsed[0].Tags)) assert.Equal(t, "default-hostname", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -158,9 +158,9 @@ func TestConvertParseSingleWithTags(t *testing.T) { assert.Equal(t, "protocol:http", parsed[0].Tags[0]) assert.Equal(t, "bench", parsed[0].Tags[1]) assert.Equal(t, "default-hostname", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -184,9 +184,9 @@ func TestConvertParseSingleWithHostTags(t *testing.T) { assert.Equal(t, "protocol:http", parsed[0].Tags[0]) assert.Equal(t, "bench", parsed[0].Tags[1]) assert.Equal(t, "custom-host", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -210,9 +210,9 @@ func TestConvertParseSingleWithEmptyHostTags(t *testing.T) { assert.Equal(t, "protocol:http", parsed[0].Tags[0]) assert.Equal(t, "bench", parsed[0].Tags[1]) assert.Equal(t, "", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -234,9 +234,9 @@ func TestConvertParseSingleWithSampleRate(t *testing.T) { assert.Equal(t, metricType, parsed[0].Mtype) assert.Equal(t, 0, len(parsed[0].Tags)) assert.Equal(t, "default-hostname", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 0.21, parsed[0].SampleRate, epsilon) } } @@ -255,9 +255,9 @@ func TestConvertParseSet(t *testing.T) { assert.Equal(t, metrics.SetType, parsed.Mtype) assert.Equal(t, 0, len(parsed.Tags)) assert.Equal(t, "default-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -275,9 +275,9 @@ func TestConvertParseSetUnicode(t *testing.T) { assert.Equal(t, metrics.SetType, parsed.Mtype) assert.Equal(t, 0, len(parsed.Tags)) assert.Equal(t, "default-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -295,9 +295,9 @@ func TestConvertParseGaugeWithPoundOnly(t *testing.T) { assert.Equal(t, metrics.GaugeType, parsed.Mtype) assert.Equal(t, 0, len(parsed.Tags)) assert.Equal(t, "default-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -316,9 +316,9 @@ func TestConvertParseGaugeWithUnicode(t *testing.T) { require.Equal(t, 1, len(parsed.Tags)) assert.Equal(t, "intitulé:T0µ", parsed.Tags[0]) assert.Equal(t, "default-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -392,9 +392,9 @@ func TestConvertServiceCheckMinimal(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -439,9 +439,9 @@ func TestConvertServiceCheckMetadataTimestamp(t *testing.T) { assert.Equal(t, int64(21), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -457,9 +457,9 @@ func TestConvertServiceCheckMetadataHostname(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -475,9 +475,9 @@ func TestConvertServiceCheckMetadataHostnameInTag(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{}, sc.Tags) } @@ -493,9 +493,9 @@ func TestConvertServiceCheckMetadataEmptyHostTag(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{"other:tag"}, sc.Tags) } @@ -511,9 +511,9 @@ func TestConvertServiceCheckMetadataTags(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{"tag1", "tag2:test", "tag3"}, sc.Tags) } @@ -529,9 +529,9 @@ func TestConvertServiceCheckMetadataMessage(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "this is fine", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -547,9 +547,9 @@ func TestConvertServiceCheckMetadataMultiple(t *testing.T) { assert.Equal(t, int64(21), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "this is fine", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{"tag1:test", "tag2"}, sc.Tags) // multiple time the same tag @@ -560,9 +560,9 @@ func TestConvertServiceCheckMetadataMultiple(t *testing.T) { assert.Equal(t, int64(22), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -577,9 +577,9 @@ func TestServiceCheckOriginTag(t *testing.T) { assert.Equal(t, int64(21), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "this is fine", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "testID", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "testID", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{"tag1:test", "tag2"}, sc.Tags) } @@ -600,9 +600,9 @@ func TestConvertEventMinimal(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMultilinesText(t *testing.T) { @@ -622,9 +622,9 @@ func TestConvertEventMultilinesText(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventPipeInTitle(t *testing.T) { @@ -644,9 +644,9 @@ func TestConvertEventPipeInTitle(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventError(t *testing.T) { @@ -734,9 +734,9 @@ func TestConvertEventMetadataTimestamp(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataPriority(t *testing.T) { @@ -756,9 +756,9 @@ func TestConvertEventMetadataPriority(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataHostname(t *testing.T) { @@ -778,9 +778,9 @@ func TestConvertEventMetadataHostname(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataHostnameInTag(t *testing.T) { @@ -800,9 +800,9 @@ func TestConvertEventMetadataHostnameInTag(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataEmptyHostTag(t *testing.T) { @@ -822,9 +822,9 @@ func TestConvertEventMetadataEmptyHostTag(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataAlertType(t *testing.T) { @@ -844,9 +844,9 @@ func TestConvertEventMetadataAlertType(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataAggregatioKey(t *testing.T) { @@ -866,9 +866,9 @@ func TestConvertEventMetadataAggregatioKey(t *testing.T) { assert.Equal(t, "some aggregation key", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataSourceType(t *testing.T) { @@ -888,9 +888,9 @@ func TestConvertEventMetadataSourceType(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "this is the source", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataTags(t *testing.T) { @@ -910,9 +910,9 @@ func TestConvertEventMetadataTags(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataMultiple(t *testing.T) { @@ -932,9 +932,9 @@ func TestConvertEventMetadataMultiple(t *testing.T) { assert.Equal(t, "aggKey", e.AggregationKey) assert.Equal(t, "source test", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestEventOriginTag(t *testing.T) { @@ -954,9 +954,9 @@ func TestEventOriginTag(t *testing.T) { assert.Equal(t, "aggKey", e.AggregationKey) assert.Equal(t, "source test", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "testID", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "testID", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertNamespace(t *testing.T) { @@ -1062,9 +1062,9 @@ func TestConvertEntityOriginDetectionNoTags(t *testing.T) { assert.Equal(t, "sometag1:somevalue1", parsed.Tags[0]) assert.Equal(t, "sometag2:somevalue2", parsed.Tags[1]) assert.Equal(t, "my-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "foo", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "foo", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -1081,9 +1081,9 @@ func TestConvertEntityOriginDetectionTags(t *testing.T) { require.Equal(t, 2, len(parsed.Tags)) assert.ElementsMatch(t, []string{"sometag1:somevalue1", "sometag2:somevalue2"}, parsed.Tags) assert.Equal(t, "my-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "foo", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "foo", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -1101,9 +1101,9 @@ func TestConvertEntityOriginDetectionTagsError(t *testing.T) { assert.Equal(t, "sometag1:somevalue1", parsed.Tags[0]) assert.Equal(t, "sometag2:somevalue2", parsed.Tags[1]) assert.Equal(t, "my-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "foo", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "foo", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -1151,7 +1151,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1167,7 +1167,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: nil, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1183,7 +1183,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "my-id"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "my-id"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1199,7 +1199,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "none"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "none"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1215,7 +1215,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1231,7 +1231,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: "high"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "high"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1247,7 +1247,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: "orchestrator"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "orchestrator"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1263,7 +1263,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: "low"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "low"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1279,7 +1279,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: "unknown"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "unknown"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1295,7 +1295,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: ""}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: ""}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1311,7 +1311,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "pod-uid", FromMsg: "container-id"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "pod-uid", ContainerID: "container-id"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1327,7 +1327,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromMsg: "container-id"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", ContainerID: "container-id"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1359,10 +1359,10 @@ func TestEnrichTags(t *testing.T) { wantedTags: []string{"env:prod"}, wantedHost: "foo", wantedOrigin: taggertypes.OriginInfo{ - FromUDS: "originID", - FromTag: "pod-uid", - FromMsg: "container-id", - ExternalData: "it-false,cn-container_name,pu-pod_uid", + ContainerIDFromSocket: "originID", + PodUID: "pod-uid", + ContainerID: "container-id", + ExternalData: "it-false,cn-container_name,pu-pod_uid", }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, diff --git a/comp/dogstatsd/server/float64_list_pool.go b/comp/dogstatsd/server/float64_list_pool.go index c55859e78b772..fc4c870641e36 100644 --- a/comp/dogstatsd/server/float64_list_pool.go +++ b/comp/dogstatsd/server/float64_list_pool.go @@ -9,7 +9,7 @@ import ( "sync" "github.com/DataDog/datadog-agent/comp/core/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" ) @@ -37,7 +37,7 @@ func newFloat64ListPool(telemetrycomp telemetry.Component) *float64ListPool { }, }, // telemetry - tlmEnabled: utils.IsTelemetryEnabled(config.Datadog()), + tlmEnabled: utils.IsTelemetryEnabled(pkgconfigsetup.Datadog()), tlmFloat64ListPoolGet: telemetrycomp.NewCounter("dogstatsd", "float64_list_pool_get", nil, "Count of get done in the float64_list pool"), tlmFloat64ListPoolPut: telemetrycomp.NewCounter("dogstatsd", "float64_list_pool_put", diff --git a/comp/dogstatsd/server/parse.go b/comp/dogstatsd/server/parse.go index 6fe618a702267..3c15ad8b05a5b 100644 --- a/comp/dogstatsd/server/parse.go +++ b/comp/dogstatsd/server/parse.go @@ -13,7 +13,7 @@ import ( "unsafe" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -72,7 +72,7 @@ type parser struct { provider provider.Provider } -func newParser(cfg config.Reader, float64List *float64ListPool, workerNum int, wmeta optional.Option[workloadmeta.Component], stringInternerTelemetry *stringInternerTelemetry) *parser { +func newParser(cfg model.Reader, float64List *float64ListPool, workerNum int, wmeta optional.Option[workloadmeta.Component], stringInternerTelemetry *stringInternerTelemetry) *parser { stringInternerCacheSize := cfg.GetInt("dogstatsd_string_interner_size") readTimestamps := cfg.GetBool("dogstatsd_no_aggregation_pipeline") diff --git a/comp/dogstatsd/server/server.go b/comp/dogstatsd/server/server.go index cb57d37f193f7..059fdafdff191 100644 --- a/comp/dogstatsd/server/server.go +++ b/comp/dogstatsd/server/server.go @@ -30,6 +30,7 @@ import ( serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -835,7 +836,7 @@ func getBuckets(cfg model.Reader, logger log.Component, option string) []float64 func getDogstatsdMappingProfiles(cfg model.Reader) ([]mapper.MappingProfileConfig, error) { var mappings []mapper.MappingProfileConfig if cfg.IsSet("dogstatsd_mapper_profiles") { - err := cfg.UnmarshalKey("dogstatsd_mapper_profiles", &mappings) + err := structure.UnmarshalKey(cfg, "dogstatsd_mapper_profiles", &mappings) if err != nil { return []mapper.MappingProfileConfig{}, fmt.Errorf("Could not parse dogstatsd_mapper_profiles: %v", err) } diff --git a/comp/dogstatsd/server/server_bench_test.go b/comp/dogstatsd/server/server_bench_test.go index 3efdeb31adefd..368541c5829f4 100644 --- a/comp/dogstatsd/server/server_bench_test.go +++ b/comp/dogstatsd/server/server_bench_test.go @@ -11,8 +11,9 @@ import ( "testing" "time" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" ) @@ -34,7 +35,7 @@ func benchParsePackets(b *testing.B, rawPacket []byte) { deps := fulfillDeps(b) s := deps.Server.(*server) // our logger will log dogstatsd packet by default if nothing is setup - pkgconfig.SetupLogger("", "off", "", "", false, true, false) + pkglogsetup.SetupLogger("", "off", "", "", false, true, false, pkgconfigsetup.Datadog()) histogram := deps.Telemetry.NewHistogram("test-dogstatsd", "channel_latency", @@ -87,7 +88,7 @@ func BenchmarkPbarseMetricMessage(b *testing.B) { deps := fulfillDeps(b) s := deps.Server.(*server) // our logger will log dogstatsd packet by default if nothing is setup - pkgconfig.SetupLogger("", "off", "", "", false, true, false) + pkglogsetup.SetupLogger("", "off", "", "", false, true, false, pkgconfigsetup.Datadog()) demux := deps.Demultiplexer @@ -139,7 +140,7 @@ func benchmarkMapperControl(b *testing.B, yaml string) { s := deps.Server.(*server) // our logger will log dogstatsd packet by default if nothing is setup - pkgconfig.SetupLogger("", "off", "", "", false, true, false) + pkglogsetup.SetupLogger("", "off", "", "", false, true, false, pkgconfigsetup.Datadog()) demux := deps.Demultiplexer diff --git a/comp/dogstatsd/server/server_test.go b/comp/dogstatsd/server/server_test.go index e60036ae2a5fd..53c865af4a654 100644 --- a/comp/dogstatsd/server/server_test.go +++ b/comp/dogstatsd/server/server_test.go @@ -44,9 +44,9 @@ import ( serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -743,7 +743,7 @@ func TestNoMappingsConfig(t *testing.T) { cfg["dogstatsd_port"] = listeners.RandomPortName deps := fulfillDepsWithConfigOverride(t, cfg) s := deps.Server.(*server) - cw := deps.Config.(config.Writer) + cw := deps.Config.(model.Writer) cw.SetWithoutSource("dogstatsd_port", listeners.RandomPortName) samples := []metrics.MetricSample{} @@ -1147,19 +1147,19 @@ func testContainerIDParsing(t *testing.T, cfg map[string]interface{}) { metrics, err := s.parseMetricMessage(nil, parser, []byte("metric.name:123|g|c:metric-container"), "", "", false) assert.NoError(err) assert.Len(metrics, 1) - assert.Equal("metric-container", metrics[0].OriginInfo.FromMsg) + assert.Equal("metric-container", metrics[0].OriginInfo.ContainerID) // Event event, err := s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "") assert.NoError(err) assert.NotNil(event) - assert.Equal("event-container", event.OriginInfo.FromMsg) + assert.Equal("event-container", event.OriginInfo.ContainerID) // Service check serviceCheck, err := s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "") assert.NoError(err) assert.NotNil(serviceCheck) - assert.Equal("service-check-container", serviceCheck.OriginInfo.FromMsg) + assert.Equal("service-check-container", serviceCheck.OriginInfo.ContainerID) } func TestContainerIDParsing(t *testing.T) { @@ -1191,19 +1191,19 @@ func TestOrigin(t *testing.T) { metrics, err := s.parseMetricMessage(nil, parser, []byte("metric.name:123|g|c:metric-container|#dd.internal.card:none"), "", "", false) assert.NoError(err) assert.Len(metrics, 1) - assert.Equal("metric-container", metrics[0].OriginInfo.FromMsg) + assert.Equal("metric-container", metrics[0].OriginInfo.ContainerID) // Event event, err := s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container|#dd.internal.card:none"), "") assert.NoError(err) assert.NotNil(event) - assert.Equal("event-container", event.OriginInfo.FromMsg) + assert.Equal("event-container", event.OriginInfo.ContainerID) // Service check serviceCheck, err := s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container|#dd.internal.card:none"), "") assert.NoError(err) assert.NotNil(serviceCheck) - assert.Equal("service-check-container", serviceCheck.OriginInfo.FromMsg) + assert.Equal("service-check-container", serviceCheck.OriginInfo.ContainerID) }) } diff --git a/comp/dogstatsd/server/serverless.go b/comp/dogstatsd/server/serverless.go index 4995ade81b07e..5426c4f2132c1 100644 --- a/comp/dogstatsd/server/serverless.go +++ b/comp/dogstatsd/server/serverless.go @@ -16,7 +16,7 @@ import ( replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/impl-noop" "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -31,7 +31,7 @@ type ServerlessDogstatsd interface { //nolint:revive // TODO(AML) Fix revive linter func NewServerlessServer(demux aggregator.Demultiplexer) (ServerlessDogstatsd, error) { wmeta := optional.NewNoneOption[workloadmeta.Component]() - s := newServerCompat(config.Datadog(), logComponentImpl.NewTemporaryLoggerWithoutInit(), replay.NewNoopTrafficCapture(), serverdebugimpl.NewServerlessServerDebug(), true, demux, wmeta, pidmapimpl.NewServerlessPidMap(), telemetry.GetCompatComponent()) + s := newServerCompat(pkgconfigsetup.Datadog(), logComponentImpl.NewTemporaryLoggerWithoutInit(), replay.NewNoopTrafficCapture(), serverdebugimpl.NewServerlessServerDebug(), true, demux, wmeta, pidmapimpl.NewServerlessPidMap(), telemetry.GetCompatComponent()) err := s.start(context.TODO()) if err != nil { diff --git a/comp/dogstatsd/serverDebug/serverdebugimpl/debug.go b/comp/dogstatsd/serverDebug/serverdebugimpl/debug.go index 887a2dd517f73..cfc062f825108 100644 --- a/comp/dogstatsd/serverDebug/serverdebugimpl/debug.go +++ b/comp/dogstatsd/serverDebug/serverdebugimpl/debug.go @@ -26,10 +26,12 @@ import ( logComponentImpl "github.com/DataDog/datadog-agent/comp/core/log/impl" serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) // Module defines the fx options for this component. @@ -74,7 +76,7 @@ type serverDebugImpl struct { // NewServerlessServerDebug creates a new instance of serverDebug.Component func NewServerlessServerDebug() serverdebug.Component { - return newServerDebugCompat(logComponentImpl.NewTemporaryLoggerWithoutInit(), config.Datadog()) + return newServerDebugCompat(logComponentImpl.NewTemporaryLoggerWithoutInit(), pkgconfigsetup.Datadog()) } // newServerDebug creates a new instance of a ServerDebug @@ -82,7 +84,7 @@ func newServerDebug(deps dependencies) serverdebug.Component { return newServerDebugCompat(deps.Log, deps.Config) } -func newServerDebugCompat(l log.Component, cfg config.Reader) serverdebug.Component { +func newServerDebugCompat(l log.Component, cfg model.Reader) serverdebug.Component { sd := &serverDebugImpl{ log: l, enabled: atomic.NewBool(false), @@ -277,7 +279,7 @@ func (d *serverDebugImpl) disableMetricsStats() { } // build a local dogstatsd logger and bubbling up any errors -func (d *serverDebugImpl) getDogstatsdDebug(cfg config.Reader) slog.LoggerInterface { +func (d *serverDebugImpl) getDogstatsdDebug(cfg model.Reader) slog.LoggerInterface { var dogstatsdLogger slog.LoggerInterface @@ -289,7 +291,7 @@ func (d *serverDebugImpl) getDogstatsdDebug(cfg config.Reader) slog.LoggerInterf // Set up dogstatsdLogger if cfg.GetBool("dogstatsd_logging_enabled") { - logger, e := config.SetupDogstatsdLogger(logFile) + logger, e := pkglogsetup.SetupDogstatsdLogger(logFile, pkgconfigsetup.Datadog()) if e != nil { // use component logger instead of global logger. d.log.Errorf("Unable to set up Dogstatsd logger: %v. || Please reach out to Datadog support at https://docs.datadoghq.com/help/ ", e) diff --git a/comp/forwarder/defaultforwarder/forwarder.go b/comp/forwarder/defaultforwarder/forwarder.go index 9769bbaeb2ab0..64cc62cd636df 100644 --- a/comp/forwarder/defaultforwarder/forwarder.go +++ b/comp/forwarder/defaultforwarder/forwarder.go @@ -33,16 +33,23 @@ type provides struct { } func newForwarder(dep dependencies) provides { + if dep.Params.useNoopForwarder { + return provides{ + Comp: NoopForwarder{}, + } + } + options := createOptions(dep.Params, dep.Config, dep.Log) - return NewForwarder(dep.Config, dep.Log, dep.Lc, true, options, dep.Params.useNoopForwarder) + + return NewForwarder(dep.Config, dep.Log, dep.Lc, true, options) } func createOptions(params Params, config config.Component, log log.Component) *Options { var options *Options + keysPerDomain := getMultipleEndpoints(config, log) if !params.withResolver { - options = NewOptions(config, log, getMultipleEndpoints(config, log)) + options = NewOptions(config, log, keysPerDomain) } else { - keysPerDomain := getMultipleEndpoints(config, log) options = NewOptionsWithResolvers(config, log, resolver.NewSingleDomainResolvers(keysPerDomain)) } // Override the DisableAPIKeyChecking only if WithFeatures was called @@ -66,12 +73,7 @@ func getMultipleEndpoints(config config.Component, log log.Component) map[string // NewForwarder returns a new forwarder component. // //nolint:revive -func NewForwarder(config config.Component, log log.Component, lc fx.Lifecycle, ignoreLifeCycleError bool, options *Options, useNoopForwarder bool) provides { - if useNoopForwarder { - return provides{ - Comp: NoopForwarder{}, - } - } +func NewForwarder(config config.Component, log log.Component, lc fx.Lifecycle, ignoreLifeCycleError bool, options *Options) provides { forwarder := NewDefaultForwarder(config, log, options) lc.Append(fx.Hook{ diff --git a/comp/forwarder/defaultforwarder/forwarder_health.go b/comp/forwarder/defaultforwarder/forwarder_health.go index fbd29370ec5cf..d046f72de5e45 100644 --- a/comp/forwarder/defaultforwarder/forwarder_health.go +++ b/comp/forwarder/defaultforwarder/forwarder_health.go @@ -149,15 +149,28 @@ func (fh *forwarderHealth) healthCheckLoop() { } for { - select { - case <-fh.stop: - return - case <-validateTicker.C: - valid := fh.checkValidAPIKey() - if !valid { - fh.log.Errorf("No valid api key found, reporting the forwarder as unhealthy.") + // only read from the health channel if the api key is valid + if valid { + select { + case <-fh.stop: + return + case <-validateTicker.C: + valid = fh.checkValidAPIKey() + if !valid { + fh.log.Errorf("No valid api key found, reporting the forwarder as unhealthy.") + } + case <-fh.health.C: + } + } else { + select { + case <-fh.stop: + return + case <-validateTicker.C: + valid = fh.checkValidAPIKey() + if !valid { + fh.log.Errorf("No valid api key found, reporting the forwarder as unhealthy.") + } } - case <-fh.health.C: } } } diff --git a/comp/forwarder/defaultforwarder/go.mod b/comp/forwarder/defaultforwarder/go.mod index 38c5179b0f7e9..322e697c6969b 100644 --- a/comp/forwarder/defaultforwarder/go.mod +++ b/comp/forwarder/defaultforwarder/go.mod @@ -19,7 +19,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/obfuscate => ../../../pkg/obfuscate github.com/DataDog/datadog-agent/pkg/orchestrator/model => ../../../pkg/orchestrator/model @@ -73,7 +75,7 @@ require ( github.com/stretchr/testify v1.9.0 go.uber.org/atomic v1.11.0 go.uber.org/fx v1.22.2 - golang.org/x/text v0.17.0 + golang.org/x/text v0.18.0 ) require ( @@ -84,6 +86,8 @@ require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect @@ -143,12 +147,12 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/forwarder/defaultforwarder/go.sum b/comp/forwarder/defaultforwarder/go.sum index 1a380bd019cc6..bb4c7a3f4b025 100644 --- a/comp/forwarder/defaultforwarder/go.sum +++ b/comp/forwarder/defaultforwarder/go.sum @@ -284,15 +284,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -305,8 +305,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -332,11 +332,11 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -349,8 +349,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go index b4432b81ffc7f..7e0f51f509689 100644 --- a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go +++ b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go @@ -20,7 +20,8 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver/eventplatformreceiverimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" @@ -58,7 +59,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ // raise the default batch_max_concurrent_send from 0 to 10 to ensure this pipeline is able to handle 4k events/s defaultBatchMaxConcurrentSend: 10, defaultBatchMaxContentSize: 10e6, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, // High input chan size is needed to handle high number of DBM events being flushed by DBM integrations defaultInputChanSize: 500, }, @@ -72,7 +73,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ // raise the default batch_max_concurrent_send from 0 to 10 to ensure this pipeline is able to handle 4k events/s defaultBatchMaxConcurrentSend: 10, defaultBatchMaxContentSize: 20e6, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, // High input chan size is needed to handle high number of DBM events being flushed by DBM integrations defaultInputChanSize: 500, }, @@ -89,7 +90,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ // raise the default batch_max_concurrent_send from 0 to 10 to ensure this pipeline is able to handle 4k events/s defaultBatchMaxConcurrentSend: 10, defaultBatchMaxContentSize: 20e6, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, // High input chan size is needed to handle high number of DBM events being flushed by DBM integrations defaultInputChanSize: 500, }, @@ -103,7 +104,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ // raise the default batch_max_concurrent_send from 0 to 10 to ensure this pipeline is able to handle 4k events/s defaultBatchMaxConcurrentSend: 10, defaultBatchMaxContentSize: 20e6, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, // High input chan size is needed to handle high number of DBM events being flushed by DBM integrations defaultInputChanSize: 500, }, @@ -115,9 +116,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "ndm-intake.", intakeTrackType: "ndm", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeSnmpTraps, @@ -127,9 +128,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "snmp-traps-intake.", intakeTrackType: "ndmtraps", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeNetworkDevicesNetFlow, @@ -139,7 +140,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "ndmflow-intake.", intakeTrackType: "ndmflow", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, // Each NetFlow flow is about 500 bytes // 10k BatchMaxSize is about 5Mo of content size @@ -162,9 +163,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "netpath-intake.", intakeTrackType: "netpath", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeContainerLifecycle, @@ -174,9 +175,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "contlcycle-intake.", intakeTrackType: "contlcycle", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeContainerImages, @@ -186,9 +187,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "contimage-intake.", intakeTrackType: "contimage", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeContainerSBOM, @@ -198,9 +199,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "sbom-intake.", intakeTrackType: "sbom", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeServiceDiscovery, @@ -210,9 +211,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "instrumentation-telemetry-intake.", intakeTrackType: "apmtelemetry", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, } @@ -246,8 +247,8 @@ func Diagnose() []diagnosis.Diagnosis { var diagnoses []diagnosis.Diagnosis for _, desc := range passthroughPipelineDescs { - configKeys := config.NewLogsConfigKeys(desc.endpointsConfigPrefix, pkgconfig.Datadog()) - endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfig.Datadog(), configKeys, desc.hostnameEndpointPrefix, desc.intakeTrackType, config.DefaultIntakeProtocol, config.DefaultIntakeOrigin) + configKeys := config.NewLogsConfigKeys(desc.endpointsConfigPrefix, pkgconfigsetup.Datadog()) + endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), configKeys, desc.hostnameEndpointPrefix, desc.intakeTrackType, config.DefaultIntakeProtocol, config.DefaultIntakeOrigin) if err != nil { diagnoses = append(diagnoses, diagnosis.Diagnosis{ Result: diagnosis.DiagnosisFail, @@ -259,7 +260,7 @@ func Diagnose() []diagnosis.Diagnosis { continue } - url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, pkgconfig.Datadog()) + url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, pkgconfigsetup.Datadog()) name := fmt.Sprintf("Connectivity to %s", url) if err == nil { diagnoses = append(diagnoses, diagnosis.Diagnosis{ @@ -370,9 +371,9 @@ type passthroughPipelineDesc struct { // newHTTPPassthroughPipeline creates a new HTTP-only event platform pipeline that sends messages directly to intake // without any of the processing that exists in regular logs pipelines. -func newHTTPPassthroughPipeline(coreConfig pkgconfig.Reader, eventPlatformReceiver eventplatformreceiver.Component, desc passthroughPipelineDesc, destinationsContext *client.DestinationsContext, pipelineID int) (p *passthroughPipeline, err error) { - configKeys := config.NewLogsConfigKeys(desc.endpointsConfigPrefix, pkgconfig.Datadog()) - endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfig.Datadog(), configKeys, desc.hostnameEndpointPrefix, desc.intakeTrackType, config.DefaultIntakeProtocol, config.DefaultIntakeOrigin) +func newHTTPPassthroughPipeline(coreConfig model.Reader, eventPlatformReceiver eventplatformreceiver.Component, desc passthroughPipelineDesc, destinationsContext *client.DestinationsContext, pipelineID int) (p *passthroughPipeline, err error) { + configKeys := config.NewLogsConfigKeys(desc.endpointsConfigPrefix, pkgconfigsetup.Datadog()) + endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), configKeys, desc.hostnameEndpointPrefix, desc.intakeTrackType, config.DefaultIntakeProtocol, config.DefaultIntakeOrigin) if err != nil { return nil, err } @@ -383,24 +384,24 @@ func newHTTPPassthroughPipeline(coreConfig pkgconfig.Reader, eventPlatformReceiv if endpoints.BatchMaxConcurrentSend <= 0 { endpoints.BatchMaxConcurrentSend = desc.defaultBatchMaxConcurrentSend } - if endpoints.BatchMaxContentSize <= pkgconfig.DefaultBatchMaxContentSize { + if endpoints.BatchMaxContentSize <= pkgconfigsetup.DefaultBatchMaxContentSize { endpoints.BatchMaxContentSize = desc.defaultBatchMaxContentSize } - if endpoints.BatchMaxSize <= pkgconfig.DefaultBatchMaxSize { + if endpoints.BatchMaxSize <= pkgconfigsetup.DefaultBatchMaxSize { endpoints.BatchMaxSize = desc.defaultBatchMaxSize } - if endpoints.InputChanSize <= pkgconfig.DefaultInputChanSize { + if endpoints.InputChanSize <= pkgconfigsetup.DefaultInputChanSize { endpoints.InputChanSize = desc.defaultInputChanSize } reliable := []client.Destination{} for i, endpoint := range endpoints.GetReliableEndpoints() { telemetryName := fmt.Sprintf("%s_%d_reliable_%d", desc.eventType, pipelineID, i) - reliable = append(reliable, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, true, telemetryName, pkgconfig.Datadog())) + reliable = append(reliable, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, true, telemetryName, pkgconfigsetup.Datadog())) } additionals := []client.Destination{} for i, endpoint := range endpoints.GetUnReliableEndpoints() { telemetryName := fmt.Sprintf("%s_%d_unreliable_%d", desc.eventType, pipelineID, i) - additionals = append(additionals, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, false, telemetryName, pkgconfig.Datadog())) + additionals = append(additionals, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, false, telemetryName, pkgconfigsetup.Datadog())) } destinations := client.NewDestinations(reliable, additionals) inputChan := make(chan *message.Message, endpoints.InputChanSize) @@ -464,7 +465,7 @@ func joinHosts(endpoints []config.Endpoint) string { return strings.Join(additionalHosts, ",") } -func newDefaultEventPlatformForwarder(config pkgconfig.Reader, eventPlatformReceiver eventplatformreceiver.Component) *defaultEventPlatformForwarder { +func newDefaultEventPlatformForwarder(config model.Reader, eventPlatformReceiver eventplatformreceiver.Component) *defaultEventPlatformForwarder { destinationsCtx := client.NewDestinationsContext() destinationsCtx.Start() pipelines := make(map[string]*passthroughPipeline) @@ -523,7 +524,7 @@ func NewNoopEventPlatformForwarder(hostname hostnameinterface.Component) eventpl } func newNoopEventPlatformForwarder(hostname hostnameinterface.Component) *defaultEventPlatformForwarder { - f := newDefaultEventPlatformForwarder(pkgconfig.Datadog(), eventplatformreceiverimpl.NewReceiver(hostname).Comp) + f := newDefaultEventPlatformForwarder(pkgconfigsetup.Datadog(), eventplatformreceiverimpl.NewReceiver(hostname).Comp) // remove the senders for _, p := range f.pipelines { p.strategy = nil diff --git a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go index 151f85d2a7061..a656b5e4b04cc 100644 --- a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go +++ b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go @@ -20,9 +20,10 @@ import ( ) // Module defines the fx options for this component. -func Module() fxutil.Module { +func Module(params Params) fxutil.Module { return fxutil.Component( - fx.Provide(newOrchestratorForwarder)) + fx.Provide(newOrchestratorForwarder), + fx.Supply(params)) } // newOrchestratorForwarder builds the orchestrator forwarder. diff --git a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go index ba079b7bb4c2b..7fedf5deeece4 100644 --- a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go +++ b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go @@ -25,9 +25,10 @@ import ( ) // Module defines the fx options for this component. -func Module() fxutil.Module { +func Module(params Params) fxutil.Module { return fxutil.Component( - fx.Provide(newOrchestratorForwarder)) + fx.Provide(newOrchestratorForwarder), + fx.Supply(params)) } // newOrchestratorForwarder returns an orchestratorForwarder diff --git a/comp/forwarder/orchestrator/orchestratorinterface/go.mod b/comp/forwarder/orchestrator/orchestratorinterface/go.mod index f8417fb044dc5..1aba5a59e8179 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/go.mod +++ b/comp/forwarder/orchestrator/orchestratorinterface/go.mod @@ -20,7 +20,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/obfuscate => ../../../../pkg/obfuscate github.com/DataDog/datadog-agent/pkg/orchestrator/model => ../../../../pkg/orchestrator/model @@ -74,7 +76,9 @@ require ( github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 // indirect @@ -150,13 +154,13 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/forwarder/orchestrator/orchestratorinterface/go.sum b/comp/forwarder/orchestrator/orchestratorinterface/go.sum index 01bf30ced87c1..b62c122e6e64d 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/go.sum +++ b/comp/forwarder/orchestrator/orchestratorinterface/go.sum @@ -280,15 +280,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -301,8 +301,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -328,11 +328,11 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -345,8 +345,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/comp/logs/agent/agentimpl/agent.go b/comp/logs/agent/agentimpl/agent.go index b5626e3d9b54b..0849d55b2b7ef 100644 --- a/comp/logs/agent/agentimpl/agent.go +++ b/comp/logs/agent/agentimpl/agent.go @@ -31,7 +31,7 @@ import ( integrationsimpl "github.com/DataDog/datadog-agent/comp/logs/integrations/impl" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" rctypes "github.com/DataDog/datadog-agent/comp/remote-config/rcclient/types" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" @@ -99,7 +99,7 @@ type provides struct { // a description of its operation. type logAgent struct { log log.Component - config pkgConfig.Reader + config model.Reader inventoryAgent inventoryagent.Component hostname hostname.Component tagger tagger.Component diff --git a/comp/logs/agent/agentimpl/agent_core_init.go b/comp/logs/agent/agentimpl/agent_core_init.go index 903fbbce2da63..c05729c40d0d6 100644 --- a/comp/logs/agent/agentimpl/agent_core_init.go +++ b/comp/logs/agent/agentimpl/agent_core_init.go @@ -13,7 +13,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/client/http" @@ -79,7 +79,7 @@ func (a *logAgent) SetupPipeline(processingRules []*config.ProcessingRule, wmeta } // buildEndpoints builds endpoints for the logs agent -func buildEndpoints(coreConfig pkgConfig.Reader) (*config.Endpoints, error) { +func buildEndpoints(coreConfig model.Reader) (*config.Endpoints, error) { httpConnectivity := config.HTTPConnectivityFailure if endpoints, err := config.BuildHTTPEndpointsWithVectorOverride(coreConfig, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin); err == nil { httpConnectivity = http.CheckConnectivity(endpoints.Main, coreConfig) diff --git a/comp/logs/agent/agentimpl/agent_serverless_init.go b/comp/logs/agent/agentimpl/agent_serverless_init.go index aff03ee85d561..31dbf3e41d2dc 100644 --- a/comp/logs/agent/agentimpl/agent_serverless_init.go +++ b/comp/logs/agent/agentimpl/agent_serverless_init.go @@ -13,8 +13,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" @@ -75,7 +75,7 @@ func (a *logAgent) SetupPipeline( } // buildEndpoints builds endpoints for the logs agent -func buildEndpoints(coreConfig pkgConfig.Reader) (*config.Endpoints, error) { +func buildEndpoints(coreConfig model.Reader) (*config.Endpoints, error) { config, err := config.BuildServerlessEndpoints(coreConfig, intakeTrackType, config.DefaultIntakeProtocol) if err != nil { return nil, err diff --git a/comp/logs/agent/agentimpl/agent_test.go b/comp/logs/agent/agentimpl/agent_test.go index d130981ae7920..c5cefb6be5113 100644 --- a/comp/logs/agent/agentimpl/agent_test.go +++ b/comp/logs/agent/agentimpl/agent_test.go @@ -36,8 +36,8 @@ import ( flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent/inventoryagentimpl" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/client/http" "github.com/DataDog/datadog-agent/pkg/logs/client/mock" "github.com/DataDog/datadog-agent/pkg/logs/client/tcp" @@ -149,7 +149,7 @@ func createAgent(suite *AgentTestSuite, endpoints *config.Endpoints) (*logAgent, } func (suite *AgentTestSuite) testAgent(endpoints *config.Endpoints) { - coreConfig.SetFeatures(suite.T(), env.Docker, env.Kubernetes) + env.SetFeatures(suite.T(), env.Docker, env.Kubernetes) agent, sources, _ := createAgent(suite, endpoints) @@ -185,7 +185,7 @@ func (suite *AgentTestSuite) TestAgentTcp() { } func (suite *AgentTestSuite) TestAgentHttp() { - server := http.NewTestServer(200, coreConfig.Datadog()) + server := http.NewTestServer(200, pkgconfigsetup.Datadog()) defer server.Stop() endpoints := config.NewEndpoints(server.Endpoint, nil, false, true) @@ -196,7 +196,7 @@ func (suite *AgentTestSuite) TestAgentStopsWithWrongBackendTcp() { endpoint := config.NewEndpoint("", "fake:", 0, false) endpoints := config.NewEndpoints(endpoint, []config.Endpoint{}, true, false) - coreConfig.SetFeatures(suite.T(), env.Docker, env.Kubernetes) + env.SetFeatures(suite.T(), env.Docker, env.Kubernetes) agent, sources, _ := createAgent(suite, endpoints) diff --git a/comp/logs/agent/agentimpl/serverless.go b/comp/logs/agent/agentimpl/serverless.go index 22e06ae5a8beb..3e280f8e2921c 100644 --- a/comp/logs/agent/agentimpl/serverless.go +++ b/comp/logs/agent/agentimpl/serverless.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/logs/agent" flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/service" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/tailers" @@ -24,7 +24,7 @@ import ( func NewServerlessLogsAgent(tagger tagger.Component) agent.ServerlessLogsAgent { logsAgent := &logAgent{ log: logComponent.NewTemporaryLoggerWithoutInit(), - config: pkgConfig.Datadog(), + config: pkgconfigsetup.Datadog(), started: atomic.NewUint32(0), sources: sources.NewLogSources(), diff --git a/comp/logs/agent/config/config.go b/comp/logs/agent/config/config.go index 5c188e2139168..4a383358329a1 100644 --- a/comp/logs/agent/config/config.go +++ b/comp/logs/agent/config/config.go @@ -15,6 +15,7 @@ import ( "time" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/config/structure" pkgconfigutils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -70,7 +71,7 @@ func GlobalProcessingRules(coreConfig pkgconfigmodel.Reader) ([]*ProcessingRule, if s, ok := raw.(string); ok && s != "" { err = json.Unmarshal([]byte(s), &rules) } else { - err = coreConfig.UnmarshalKey("logs_config.processing_rules", &rules) + err = structure.UnmarshalKey(coreConfig, "logs_config.processing_rules", &rules, structure.ConvertEmptyStringToNil) } if err != nil { return nil, err diff --git a/comp/logs/agent/config/config_keys.go b/comp/logs/agent/config/config_keys.go index df5ee1ad4abe6..0f9dbbf2b36de 100644 --- a/comp/logs/agent/config/config_keys.go +++ b/comp/logs/agent/config/config_keys.go @@ -11,6 +11,7 @@ import ( pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -152,7 +153,7 @@ func (l *LogsConfigKeys) getAdditionalEndpoints() []unmarshalEndpoint { if s, ok := raw.(string); ok && s != "" { err = json.Unmarshal([]byte(s), &endpoints) } else { - err = l.getConfig().UnmarshalKey(configKey, &endpoints) + err = structure.UnmarshalKey(l.getConfig(), configKey, &endpoints, structure.EnableSquash) } if err != nil { log.Warnf("Could not parse additional_endpoints for logs: %v", err) diff --git a/comp/logs/agent/config/go.mod b/comp/logs/agent/config/go.mod index ee7c4157195db..8fa2037f67e06 100644 --- a/comp/logs/agent/config/go.mod +++ b/comp/logs/agent/config/go.mod @@ -15,7 +15,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env/ github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model/ + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/telemetry => ../../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable @@ -35,12 +38,13 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 github.com/DataDog/viper v1.13.5 github.com/stretchr/testify v1.9.0 go.uber.org/fx v1.22.2 @@ -49,19 +53,21 @@ require ( require ( github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect @@ -81,7 +87,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -93,12 +99,12 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/logs/agent/config/go.sum b/comp/logs/agent/config/go.sum index c0f06ba5f32fa..b8178bc54e8ad 100644 --- a/comp/logs/agent/config/go.sum +++ b/comp/logs/agent/config/go.sum @@ -182,8 +182,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -255,15 +256,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -299,8 +300,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -316,8 +317,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/comp/metadata/host/hostimpl/hosttags/tags.go b/comp/metadata/host/hostimpl/hosttags/tags.go index 9215071ea4b33..606a874bf7fd3 100644 --- a/comp/metadata/host/hostimpl/hosttags/tags.go +++ b/comp/metadata/host/hostimpl/hosttags/tags.go @@ -12,8 +12,8 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/config/model" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/cache" @@ -44,7 +44,7 @@ type providerDef struct { getTags func(context.Context) ([]string, error) } -func getProvidersDefinitions(conf config.Reader) map[string]*providerDef { +func getProvidersDefinitions(conf model.Reader) map[string]*providerDef { providers := make(map[string]*providerDef) if conf.GetBool("collect_gce_tags") { @@ -100,7 +100,7 @@ func appendAndSplitTags(target []string, tags []string, splits map[string]string // - First one controlled by `cached` boolean, used for performances (cache all tags) // - Second one per provider, to avoid missing host tags for 30 minutes when a component fails (for instance, Cluster Agent). // This second layer is always on. -func Get(ctx context.Context, cached bool, conf config.Reader) *Tags { +func Get(ctx context.Context, cached bool, conf model.Reader) *Tags { if cached { if x, found := cache.Cache.Get(tagsCacheKey); found { tags := x.(*Tags) diff --git a/comp/metadata/host/hostimpl/hosttags/tags_test.go b/comp/metadata/host/hostimpl/hosttags/tags_test.go index ebd42ed745b1b..64460410ac143 100644 --- a/comp/metadata/host/hostimpl/hosttags/tags_test.go +++ b/comp/metadata/host/hostimpl/hosttags/tags_test.go @@ -15,9 +15,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" - model "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/config/model" ) func setupTest(t *testing.T) (model.Config, context.Context) { @@ -111,7 +110,7 @@ func TestHostTagsCache(t *testing.T) { var fooErr error nbCall := 0 - getProvidersDefinitionsFunc = func(config.Reader) map[string]*providerDef { + getProvidersDefinitionsFunc = func(model.Reader) map[string]*providerDef { return map[string]*providerDef{ "foo": { retries: 2, diff --git a/comp/metadata/host/hostimpl/utils/common.go b/comp/metadata/host/hostimpl/utils/common.go index b4e4c5ef6a992..08e5888a99359 100644 --- a/comp/metadata/host/hostimpl/utils/common.go +++ b/comp/metadata/host/hostimpl/utils/common.go @@ -6,7 +6,7 @@ package utils import ( - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/uuid" "github.com/DataDog/datadog-agent/pkg/version" @@ -21,7 +21,7 @@ type CommonPayload struct { } // GetCommonPayload fills and return the common metadata payload -func GetCommonPayload(hostname string, conf config.Reader) *CommonPayload { +func GetCommonPayload(hostname string, conf model.Reader) *CommonPayload { return &CommonPayload{ // olivier: I _think_ `APIKey` is only a legacy field, and // is not actually used by the backend diff --git a/comp/metadata/host/hostimpl/utils/host.go b/comp/metadata/host/hostimpl/utils/host.go index 5646dfbd44287..72ed11ad307cc 100644 --- a/comp/metadata/host/hostimpl/utils/host.go +++ b/comp/metadata/host/hostimpl/utils/host.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" "github.com/DataDog/datadog-agent/comp/otelcol/otlp" "github.com/DataDog/datadog-agent/pkg/collector/python" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/status" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" @@ -118,14 +118,14 @@ func getNetworkMeta(ctx context.Context) *NetworkMeta { return networkMeta } -func getLogsMeta(conf config.Reader) *LogsMeta { +func getLogsMeta(conf model.Reader) *LogsMeta { return &LogsMeta{ Transport: string(status.GetCurrentTransport()), AutoMultilineEnabled: conf.GetBool("logs_config.auto_multi_line_detection"), } } -func getInstallMethod(conf config.Reader) *InstallMethod { +func getInstallMethod(conf model.Reader) *InstallMethod { install, err := installinfoGet(conf) if err != nil { return &InstallMethod{ @@ -145,7 +145,7 @@ func getInstallMethod(conf config.Reader) *InstallMethod { // metadata payload. The NoProxy maps contain any errors or warnings due to the behavior changing when // no_proxy_nonexact_match is enabled. ProxyBehaviorChanged is true in the metadata if there would be any errors or // warnings indicating that there would a behavior change if 'no_proxy_nonexact_match' was enabled. -func getProxyMeta(conf config.Reader) *ProxyMeta { +func getProxyMeta(conf model.Reader) *ProxyMeta { NoProxyNonexactMatchExplicitlySetState := false NoProxyNonexactMatch := false if conf.IsSet("no_proxy_nonexact_match") { @@ -168,7 +168,7 @@ func GetOSVersion() string { // GetPayload builds a metadata payload every time is called. // Some data is collected only once, some is cached, some is collected at every call. -func GetPayload(ctx context.Context, conf config.Reader) *Payload { +func GetPayload(ctx context.Context, conf model.Reader) *Payload { hostnameData, err := hostname.GetWithProvider(ctx) if err != nil { log.Errorf("Error grabbing hostname for status: %v", err) @@ -200,7 +200,7 @@ func GetPayload(ctx context.Context, conf config.Reader) *Payload { // GetFromCache returns the payload from the cache if it exists, otherwise it creates it. // The metadata reporting should always grab it fresh. Any other uses, e.g. status, should use this -func GetFromCache(ctx context.Context, conf config.Reader) *Payload { +func GetFromCache(ctx context.Context, conf model.Reader) *Payload { data, found := cache.Cache.Get(hostCacheKey) if !found { return GetPayload(ctx, conf) diff --git a/comp/metadata/host/hostimpl/utils/host_test.go b/comp/metadata/host/hostimpl/utils/host_test.go index 6e8da546812fb..8a7d5294ae0d2 100644 --- a/comp/metadata/host/hostimpl/utils/host_test.go +++ b/comp/metadata/host/hostimpl/utils/host_test.go @@ -15,8 +15,8 @@ import ( "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/pkg/collector/python" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/status" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" @@ -31,13 +31,13 @@ func TestOTLPEnabled(t *testing.T) { ctx := context.Background() conf := configmock.New(t) - defer func(orig func(cfg config.Reader) bool) { otlpIsEnabled = orig }(otlpIsEnabled) + defer func(orig func(cfg model.Reader) bool) { otlpIsEnabled = orig }(otlpIsEnabled) - otlpIsEnabled = func(config.Reader) bool { return false } + otlpIsEnabled = func(model.Reader) bool { return false } p := GetPayload(ctx, conf) assert.False(t, p.OtlpMeta.Enabled) - otlpIsEnabled = func(config.Reader) bool { return true } + otlpIsEnabled = func(model.Reader) bool { return true } p = GetPayload(ctx, conf) assert.True(t, p.OtlpMeta.Enabled) } @@ -69,18 +69,18 @@ func TestGetLogsMeta(t *testing.T) { func TestGetInstallMethod(t *testing.T) { conf := configmock.New(t) - defer func(orig func(conf config.Reader) (*installinfo.InstallInfo, error)) { + defer func(orig func(conf model.Reader) (*installinfo.InstallInfo, error)) { installinfoGet = orig }(installinfoGet) - installinfoGet = func(config.Reader) (*installinfo.InstallInfo, error) { return nil, fmt.Errorf("an error") } + installinfoGet = func(model.Reader) (*installinfo.InstallInfo, error) { return nil, fmt.Errorf("an error") } installMethod := getInstallMethod(conf) assert.Equal(t, "undefined", installMethod.ToolVersion) assert.Nil(t, installMethod.Tool) assert.Nil(t, installMethod.InstallerVersion) - installinfoGet = func(config.Reader) (*installinfo.InstallInfo, error) { + installinfoGet = func(model.Reader) (*installinfo.InstallInfo, error) { return &installinfo.InstallInfo{ ToolVersion: "chef-15", Tool: "chef", diff --git a/comp/metadata/host/hostimpl/utils/meta.go b/comp/metadata/host/hostimpl/utils/meta.go index a395a82f02824..bbd5f2b479cf9 100644 --- a/comp/metadata/host/hostimpl/utils/meta.go +++ b/comp/metadata/host/hostimpl/utils/meta.go @@ -10,7 +10,7 @@ import ( "os" "time" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" @@ -38,7 +38,7 @@ type Meta struct { // GetMetaFromCache returns the metadata information about the host from the cache and returns it, if the cache is // empty, then it queries the information directly -func GetMetaFromCache(ctx context.Context, conf config.Reader) *Meta { +func GetMetaFromCache(ctx context.Context, conf model.Reader) *Meta { res, _ := cache.Get[*Meta]( metaCacheKey, func() (*Meta, error) { @@ -49,7 +49,7 @@ func GetMetaFromCache(ctx context.Context, conf config.Reader) *Meta { } // GetMeta returns the metadata information about the host and refreshes the cache -func GetMeta(ctx context.Context, conf config.Reader) *Meta { +func GetMeta(ctx context.Context, conf model.Reader) *Meta { osHostname, _ := os.Hostname() tzname, _ := time.Now().Zone() ec2Hostname, _ := ec2.GetHostname(ctx) diff --git a/comp/metadata/internal/util/inventory_enabled.go b/comp/metadata/internal/util/inventory_enabled.go index ee4ebbda3c0df..aa351bd780881 100644 --- a/comp/metadata/internal/util/inventory_enabled.go +++ b/comp/metadata/internal/util/inventory_enabled.go @@ -6,13 +6,13 @@ package util import ( - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) // InventoryEnabled returs true if 'enable_metadata_collection' and 'inventories_enabled' are set to true in the // configuration. -func InventoryEnabled(conf config.Reader) bool { +func InventoryEnabled(conf model.Reader) bool { if !conf.GetBool("enable_metadata_collection") { log.Debug("Metadata collection disabled: inventories payload will not be collected nor sent") return false diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go index c1ecb0a63d55b..72e94be45a615 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go @@ -32,6 +32,7 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" "github.com/DataDog/datadog-agent/pkg/config/env" configFetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher" + sysprobeConfigFetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher/sysprobe" "github.com/DataDog/datadog-agent/pkg/config/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" @@ -60,7 +61,7 @@ var ( fetchSecurityConfig = configFetcher.SecurityAgentConfig fetchProcessConfig = func(cfg model.Reader) (string, error) { return configFetcher.ProcessAgentConfig(cfg, true) } fetchTraceConfig = configFetcher.TraceAgentConfig - fetchSystemProbeConfig = configFetcher.SystemProbeConfig + fetchSystemProbeConfig = sysprobeConfigFetcher.SystemProbeConfig ) type agentMetadata map[string]interface{} diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go index 564f38ea35b1f..3a3b279ae777a 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go @@ -24,8 +24,8 @@ import ( logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" configFetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher" + sysprobeConfigFetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher/sysprobe" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" @@ -274,7 +274,7 @@ func TestConfigRefresh(t *testing.T) { ia := getTestInventoryPayload(t, nil, nil) assert.False(t, ia.RefreshTriggered()) - pkgconfig.Datadog().Set("inventories_max_interval", 10*60, pkgconfigmodel.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("inventories_max_interval", 10*60, pkgconfigmodel.SourceAgentRuntime) assert.True(t, ia.RefreshTriggered()) } @@ -457,7 +457,7 @@ func TestFetchSystemProbeAgent(t *testing.T) { } defer func() { - fetchSystemProbeConfig = configFetcher.SystemProbeConfig + fetchSystemProbeConfig = sysprobeConfigFetcher.SystemProbeConfig }() fetchSystemProbeConfig = func(config pkgconfigmodel.Reader) (string, error) { // test that the system-probe config was passed and not the agent config diff --git a/comp/metadata/inventoryotel/inventoryotelimpl/inventoryotel_test.go b/comp/metadata/inventoryotel/inventoryotelimpl/inventoryotel_test.go index 4617b590bc214..1ce722fc511a1 100644 --- a/comp/metadata/inventoryotel/inventoryotelimpl/inventoryotel_test.go +++ b/comp/metadata/inventoryotel/inventoryotelimpl/inventoryotel_test.go @@ -20,8 +20,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" serializermock "github.com/DataDog/datadog-agent/pkg/serializer/mocks" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -101,7 +101,7 @@ func TestConfigRefresh(t *testing.T) { io := getTestInventoryPayload(t, nil) assert.False(t, io.RefreshTriggered()) - pkgconfig.Datadog().Set("inventories_max_interval", 10*60, pkgconfigmodel.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("inventories_max_interval", 10*60, pkgconfigmodel.SourceAgentRuntime) assert.True(t, io.RefreshTriggered()) } diff --git a/comp/metadata/systemprobe/impl/system_probe.go b/comp/metadata/systemprobe/impl/system_probe.go index fdead081c4695..04b33cc38d687 100644 --- a/comp/metadata/systemprobe/impl/system_probe.go +++ b/comp/metadata/systemprobe/impl/system_probe.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/internal/util" "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" systemprobemetadata "github.com/DataDog/datadog-agent/comp/metadata/systemprobe/def" - configFetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher" + configFetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher/sysprobe" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" diff --git a/comp/metadata/systemprobe/impl/system_probe_test.go b/comp/metadata/systemprobe/impl/system_probe_test.go index edc18b5e79dbf..40bad07c0a505 100644 --- a/comp/metadata/systemprobe/impl/system_probe_test.go +++ b/comp/metadata/systemprobe/impl/system_probe_test.go @@ -24,7 +24,7 @@ import ( logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" - configFetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher" + configFetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher/sysprobe" "github.com/DataDog/datadog-agent/pkg/config/model" serializermock "github.com/DataDog/datadog-agent/pkg/serializer/mocks" "github.com/DataDog/datadog-agent/pkg/util/fxutil" diff --git a/comp/netflow/config/config.go b/comp/netflow/config/config.go index ed5db3eb41bbc..1854ed39dfc1a 100644 --- a/comp/netflow/config/config.go +++ b/comp/netflow/config/config.go @@ -12,6 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/snmp/utils" "github.com/DataDog/datadog-agent/comp/netflow/common" @@ -59,7 +60,7 @@ type Mapping struct { func ReadConfig(conf config.Component, logger log.Component) (*NetflowConfig, error) { var mainConfig NetflowConfig - err := conf.UnmarshalKey("network_devices.netflow", &mainConfig) + err := structure.UnmarshalKey(conf, "network_devices.netflow", &mainConfig) if err != nil { return nil, err } diff --git a/comp/netflow/config/config_test.go b/comp/netflow/config/config_test.go index 89f232a601a0b..4fd6a9331dbda 100644 --- a/comp/netflow/config/config_test.go +++ b/comp/netflow/config/config_test.go @@ -14,7 +14,7 @@ import ( logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" "github.com/DataDog/datadog-agent/comp/netflow/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestReadConfig(t *testing.T) { @@ -216,11 +216,11 @@ network_devices: } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(tt.configYaml)) + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(tt.configYaml)) require.NoError(t, err) - readConfig, err := ReadConfig(config.Datadog(), logger) + readConfig, err := ReadConfig(pkgconfigsetup.Datadog(), logger) if tt.expectedError != "" { assert.ErrorContains(t, err, tt.expectedError) assert.Nil(t, readConfig) diff --git a/comp/netflow/flowaggregator/flowaccumulator_test.go b/comp/netflow/flowaggregator/flowaccumulator_test.go index 8e51b56ac1f82..1707ba14a9ed9 100644 --- a/comp/netflow/flowaggregator/flowaccumulator_test.go +++ b/comp/netflow/flowaggregator/flowaccumulator_test.go @@ -306,3 +306,77 @@ func Test_flowAccumulator_flush(t *testing.T) { _, ok = acc.flows[flow.AggregationHash()] assert.False(t, ok) } + +func Test_flowAccumulator_detectHashCollision(t *testing.T) { + logger := logmock.New(t) + rdnsQuerier := fxutil.Test[rdnsquerier.Component](t, rdnsquerierfxmock.MockModule()) + synFlag := uint32(2) + timeNow = MockTimeNow + flushInterval := 60 * time.Second + flowContextTTL := 60 * time.Second + + // Given + flowA1 := &common.Flow{ + FlowType: common.TypeNetFlow9, + ExporterAddr: []byte{127, 0, 0, 1}, + StartTimestamp: 1234568, + EndTimestamp: 1234569, + Bytes: 20, + Packets: 4, + SrcAddr: []byte{10, 10, 10, 10}, + DstAddr: []byte{10, 10, 10, 20}, + IPProtocol: uint32(6), + SrcPort: 1000, + DstPort: 80, + TCPFlags: synFlag, + } + flowA2 := &common.Flow{ + FlowType: common.TypeNetFlow9, + ExporterAddr: []byte{127, 0, 0, 1}, + StartTimestamp: 1234568, + EndTimestamp: 1234569, + Bytes: 20, + Packets: 4, + SrcAddr: []byte{10, 10, 10, 10}, + DstAddr: []byte{10, 10, 10, 20}, + IPProtocol: uint32(6), + SrcPort: 1000, + DstPort: 80, + TCPFlags: synFlag, + } + flowB1 := &common.Flow{ + FlowType: common.TypeNetFlow9, + ExporterAddr: []byte{127, 0, 0, 1}, + StartTimestamp: 1234568, + EndTimestamp: 1234569, + Bytes: 100, + Packets: 10, + SrcAddr: []byte{10, 10, 10, 10}, + DstAddr: []byte{10, 10, 10, 30}, + IPProtocol: uint32(6), + SrcPort: 80, + DstPort: 2001, + } + + // When + acc := newFlowAccumulator(flushInterval, flowContextTTL, common.DefaultAggregatorPortRollupThreshold, false, logger, rdnsQuerier) + + // Then + assert.Equal(t, uint64(0), acc.hashCollisionFlowCount.Load()) + + // test valid hash collision (same flow object) does not increment flow count + aggHash1 := flowA1.AggregationHash() + acc.detectHashCollision(aggHash1, *flowA1, *flowA1) + assert.Equal(t, uint64(0), acc.hashCollisionFlowCount.Load()) + + // test valid hash collision (same data, new flow object) does not increment flow count + // Note: not a realistic use case as hashes will be different, but testing for completeness + aggHash2 := flowA2.AggregationHash() + acc.detectHashCollision(aggHash2, *flowA1, *flowA2) + assert.Equal(t, uint64(0), acc.hashCollisionFlowCount.Load()) + + // test invalid hash collision (different flow context, same hash) increments flow count + aggHash3 := flowB1.AggregationHash() + acc.detectHashCollision(aggHash3, *flowA1, *flowB1) + assert.Equal(t, uint64(1), acc.hashCollisionFlowCount.Load()) +} diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go index f65bce267ee70..b1f1e089ef8be 100644 --- a/comp/networkpath/npcollector/npcollectorimpl/npcollector.go +++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector.go @@ -120,8 +120,13 @@ func (s *npCollectorImpl) ScheduleConns(conns []*model.Connection) { startTime := s.TimeNowFn() for _, conn := range conns { remoteAddr := conn.Raddr - remotePort := uint16(conn.Raddr.GetPort()) protocol := convertProtocol(conn.GetType()) + var remotePort uint16 + // UDP traces should not be done to the active + // port + if protocol != payload.ProtocolUDP { + remotePort = uint16(conn.Raddr.GetPort()) + } if !shouldScheduleNetworkPathForConn(conn) { s.logger.Tracef("Skipped connection: addr=%s, port=%d, protocol=%s", remoteAddr, remotePort, protocol) continue diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go index 9fe8542ab59d2..54fef6f329523 100644 --- a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go +++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go @@ -133,6 +133,7 @@ func Test_NpCollector_runningAndProcessing(t *testing.T) { event1 := []byte(` { "timestamp": 0, + "agent_version": "", "namespace": "my-ns1", "pathtrace_id": "pathtrace-id-111", "origin":"network_traffic", @@ -166,6 +167,7 @@ func Test_NpCollector_runningAndProcessing(t *testing.T) { event2 := []byte(` { "timestamp": 0, + "agent_version": "", "namespace": "my-ns1", "pathtrace_id": "pathtrace-id-222", "origin":"network_traffic", @@ -297,9 +299,9 @@ func Test_newNpCollectorImpl_defaultConfigs(t *testing.T) { assert.Equal(t, true, npCollector.collectorConfigs.networkPathCollectorEnabled()) assert.Equal(t, 4, npCollector.workers) - assert.Equal(t, 1000, cap(npCollector.pathtestInputChan)) - assert.Equal(t, 1000, cap(npCollector.pathtestProcessingChan)) - assert.Equal(t, 10000, npCollector.collectorConfigs.pathtestContextsLimit) + assert.Equal(t, 100000, cap(npCollector.pathtestInputChan)) + assert.Equal(t, 100000, cap(npCollector.pathtestProcessingChan)) + assert.Equal(t, 100000, npCollector.collectorConfigs.pathtestContextsLimit) assert.Equal(t, "default", npCollector.networkDevicesNamespace) } @@ -372,7 +374,7 @@ func Test_npCollectorImpl_ScheduleConns(t *testing.T) { }, }, expectedPathtests: []*common.Pathtest{ - {Hostname: "10.0.0.6", Port: uint16(161), Protocol: payload.ProtocolUDP, SourceContainerID: "testId1"}, + {Hostname: "10.0.0.6", Port: uint16(0), Protocol: payload.ProtocolUDP, SourceContainerID: "testId1"}, }, }, { diff --git a/comp/otelcol/collector-contrib/impl/components.go b/comp/otelcol/collector-contrib/impl/components.go index 28ac885df9fa7..9f27554af7d37 100644 --- a/comp/otelcol/collector-contrib/impl/components.go +++ b/comp/otelcol/collector-contrib/impl/components.go @@ -38,7 +38,6 @@ import ( "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" debugexporter "go.opentelemetry.io/collector/exporter/debugexporter" - loggingexporter "go.opentelemetry.io/collector/exporter/loggingexporter" nopexporter "go.opentelemetry.io/collector/exporter/nopexporter" otlpexporter "go.opentelemetry.io/collector/exporter/otlpexporter" otlphttpexporter "go.opentelemetry.io/collector/exporter/otlphttpexporter" @@ -88,7 +87,6 @@ func components() (otelcol.Factories, error) { factories.Exporters, err = exporter.MakeFactoryMap( debugexporter.NewFactory(), - loggingexporter.NewFactory(), nopexporter.NewFactory(), otlpexporter.NewFactory(), otlphttpexporter.NewFactory(), diff --git a/comp/otelcol/collector-contrib/impl/go.mod b/comp/otelcol/collector-contrib/impl/go.mod index 96fb8ef3d6823..6342ac0e8bca0 100644 --- a/comp/otelcol/collector-contrib/impl/go.mod +++ b/comp/otelcol/collector-contrib/impl/go.mod @@ -38,7 +38,6 @@ require ( go.opentelemetry.io/collector/connector v0.104.0 go.opentelemetry.io/collector/exporter v0.104.0 go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 diff --git a/comp/otelcol/collector-contrib/impl/go.sum b/comp/otelcol/collector-contrib/impl/go.sum index b4814e7aa6f0c..9910d38ca9f68 100644 --- a/comp/otelcol/collector-contrib/impl/go.sum +++ b/comp/otelcol/collector-contrib/impl/go.sum @@ -928,8 +928,6 @@ go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBL go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw= go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 h1:MaBTuHmK/HAQ+/rLTrGf3tazKum8Sic3/CaXgNr5xnc= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0/go.mod h1:sXZhACvds6z71cf2fzKrojMgdJItJZxeClKlF/PI/l8= go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 h1:33JeCQiJbvhSXFqQ34R4ole/wD4iHtF5LYp2GziYVnY= go.opentelemetry.io/collector/exporter/nopexporter v0.104.0/go.mod h1:73afhI8uc5NKAl9pMJlgQQ46Ck9e7nQ2zZGXHHSzuwo= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= diff --git a/comp/otelcol/collector-contrib/impl/manifest.yaml b/comp/otelcol/collector-contrib/impl/manifest.yaml index 092ea7cb47a3c..69f4208a1de98 100644 --- a/comp/otelcol/collector-contrib/impl/manifest.yaml +++ b/comp/otelcol/collector-contrib/impl/manifest.yaml @@ -18,7 +18,6 @@ extensions: exporters: - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 - - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 diff --git a/comp/otelcol/collector/impl-pipeline/flare_filler.go b/comp/otelcol/collector/impl-pipeline/flare_filler.go index 109429fb4bb9d..97378486d1a22 100644 --- a/comp/otelcol/collector/impl-pipeline/flare_filler.go +++ b/comp/otelcol/collector/impl-pipeline/flare_filler.go @@ -9,19 +9,16 @@ package collectorimpl import ( - "context" - "crypto/tls" "encoding/json" - "errors" "fmt" "io" "net/http" "net/url" "strings" - "time" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" extension "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def" + apiutil "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -130,35 +127,15 @@ func (c *collectorImpl) requestOtelConfigInfo(endpointURL string) ([]byte, error return []byte(overrideConfigResponse), nil } - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + options := apiutil.ReqOptions{ + Ctx: c.ctx, + Authtoken: c.authToken.Get(), } - client := &http.Client{Transport: tr} - timeoutSeconds := c.config.GetInt("otelcollector.extension_timeout") - if timeoutSeconds == 0 { - timeoutSeconds = defaultExtensionTimeout - } - - ctx := context.TODO() - ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) - defer cancel() - req, err := http.NewRequestWithContext(ctx, "GET", endpointURL, nil) + data, err := apiutil.DoGetWithOptions(c.client, endpointURL, &options) if err != nil { return nil, err } - res, err := client.Do(req) - if err != nil { - return nil, err - } - body, err := io.ReadAll(res.Body) - res.Body.Close() - if err != nil { - return nil, err - } - if res.StatusCode >= 400 { - return nil, errors.New(string(body)) - } - return body, nil + return data, nil } diff --git a/comp/otelcol/collector/impl-pipeline/pipeline.go b/comp/otelcol/collector/impl-pipeline/pipeline.go index 2771f70a9fe85..6b0fd740a7587 100644 --- a/comp/otelcol/collector/impl-pipeline/pipeline.go +++ b/comp/otelcol/collector/impl-pipeline/pipeline.go @@ -10,7 +10,10 @@ package collectorimpl import ( "context" + "net/http" + "time" + "github.com/DataDog/datadog-agent/comp/api/authtoken" "github.com/DataDog/datadog-agent/comp/core/config" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" log "github.com/DataDog/datadog-agent/comp/core/log/def" @@ -22,6 +25,7 @@ import ( "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline" "github.com/DataDog/datadog-agent/comp/otelcol/otlp" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/datatype" + apiutil "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -44,6 +48,9 @@ type Requires struct { // Log specifies the logging component. Log log.Component + // Authtoken specifies the authentication token component. + Authtoken authtoken.Component + // Serializer specifies the metrics serializer that is used to export metrics // to Datadog. Serializer serializer.MetricSerializer @@ -67,6 +74,7 @@ type Provides struct { } type collectorImpl struct { + authToken authtoken.Component col *otlp.Pipeline config config.Component log log.Component @@ -74,6 +82,8 @@ type collectorImpl struct { logsAgent optional.Option[logsagentpipeline.Component] inventoryAgent inventoryagent.Component tagger tagger.Component + client *http.Client + ctx context.Context } func (c *collectorImpl) start(context.Context) error { @@ -96,11 +106,8 @@ func (c *collectorImpl) start(context.Context) error { return nil } c.col = col - // the context passed to this function has a startup deadline which - // will shutdown the collector prematurely - ctx := context.Background() go func() { - if err := col.Run(ctx); err != nil { + if err := col.Run(c.ctx); err != nil { c.log.Errorf("Error running the OTLP ingest pipeline: %v", err) } }() @@ -121,13 +128,23 @@ func (c *collectorImpl) Status() datatype.CollectorStatus { // NewComponent creates a new Component for this module and returns any errors on failure. func NewComponent(reqs Requires) (Provides, error) { + + timeoutSeconds := reqs.Config.GetInt("otelcollector.extension_timeout") + if timeoutSeconds == 0 { + timeoutSeconds = defaultExtensionTimeout + } + client := apiutil.GetClientWithTimeout(time.Duration(timeoutSeconds)*time.Second, false) + collector := &collectorImpl{ + authToken: reqs.Authtoken, config: reqs.Config, log: reqs.Log, serializer: reqs.Serializer, logsAgent: reqs.LogsAgent, inventoryAgent: reqs.InventoryAgent, tagger: reqs.Tagger, + client: client, + ctx: context.Background(), } reqs.Lc.Append(compdef.Hook{ diff --git a/comp/otelcol/collector/impl/collector.go b/comp/otelcol/collector/impl/collector.go index 87fcbae39ce63..9b1e050156330 100644 --- a/comp/otelcol/collector/impl/collector.go +++ b/comp/otelcol/collector/impl/collector.go @@ -10,6 +10,8 @@ package collectorimpl import ( "context" + "os" + "path/filepath" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" @@ -21,13 +23,17 @@ import ( "go.opentelemetry.io/collector/confmap/provider/yamlprovider" "go.opentelemetry.io/collector/otelcol" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" compdef "github.com/DataDog/datadog-agent/comp/def" collectorcontrib "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def" collector "github.com/DataDog/datadog-agent/comp/otelcol/collector/def" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" ddextension "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl" "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter" @@ -39,9 +45,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/serializer" zapAgent "github.com/DataDog/datadog-agent/pkg/util/log/zap" "github.com/DataDog/datadog-agent/pkg/util/optional" - "github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) type collectorImpl struct { @@ -59,7 +62,6 @@ type Requires struct { // Log specifies the logging component. Log log.Component Provider confmap.Converter - ConfigStore configstore.Component Config config.Component CollectorContrib collectorcontrib.Component Serializer serializer.MetricSerializer @@ -110,15 +112,19 @@ func newConfigProviderSettings(reqs Requires, enhanced bool) otelcol.ConfigProvi } } +func generateID(group, resource, namespace, name string) string { + return string(util.GenerateKubeMetadataEntityID(group, resource, namespace, name)) +} + func addFactories(reqs Requires, factories otelcol.Factories) { if v, ok := reqs.LogsAgent.Get(); ok { factories.Exporters[datadogexporter.Type] = datadogexporter.NewFactory(reqs.TraceAgent, reqs.Serializer, v, reqs.SourceProvider, reqs.StatsdClientWrapper) } else { factories.Exporters[datadogexporter.Type] = datadogexporter.NewFactory(reqs.TraceAgent, reqs.Serializer, nil, reqs.SourceProvider, reqs.StatsdClientWrapper) } - factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactory(reqs.Tagger) - factories.Extensions[ddextension.Type] = ddextension.NewFactory(reqs.ConfigStore) + factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactory(reqs.Tagger, generateID) factories.Connectors[component.MustNewType("datadog")] = datadogconnector.NewFactory() + factories.Extensions[ddextension.Type] = ddextension.NewFactory(&factories, newConfigProviderSettings(reqs, false)) } // NewComponent returns a new instance of the collector component. @@ -130,11 +136,6 @@ func NewComponent(reqs Requires) (Provides, error) { addFactories(reqs, factories) converterEnabled := reqs.Config.GetBool("otelcollector.converter.enabled") - err = reqs.ConfigStore.AddConfigs(newConfigProviderSettings(reqs, false), newConfigProviderSettings(reqs, converterEnabled), factories) - if err != nil { - return Provides{}, err - } - // Replace default core to use Agent logger options := []zap.Option{ zap.WrapCore(func(zapcore.Core) zapcore.Core { @@ -144,7 +145,7 @@ func NewComponent(reqs Requires) (Provides, error) { set := otelcol.CollectorSettings{ BuildInfo: component.BuildInfo{ Version: "v0.104.0", - Command: "otel-agent", + Command: filepath.Base(os.Args[0]), Description: "Datadog Agent OpenTelemetry Collector", }, LoggingOptions: options, diff --git a/comp/otelcol/collector/impl/collector_test.go b/comp/otelcol/collector/impl/collector_test.go deleted file mode 100644 index c7a2e2c1f50ff..0000000000000 --- a/comp/otelcol/collector/impl/collector_test.go +++ /dev/null @@ -1,196 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build otlp - -// Package collectorimpl provides the implementation of the collector component for OTel Agent -package collectorimpl - -import ( - "os" - "path/filepath" - "testing" - - compdef "github.com/DataDog/datadog-agent/comp/def" - collectorcontribimpl "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" - converter "github.com/DataDog/datadog-agent/comp/otelcol/converter/impl" - "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/confmap/confmaptest" - "gopkg.in/yaml.v3" -) - -type lifecycle struct{} - -func (*lifecycle) Append(compdef.Hook) {} - -func uriFromFile(filename string) []string { - return []string{filepath.Join("testdata", filename)} -} - -func yamlBytesToMap(bytesConfig []byte) (map[string]any, error) { - var configMap = map[string]interface{}{} - err := yaml.Unmarshal(bytesConfig, configMap) - if err != nil { - return nil, err - } - return configMap, nil -} - -func TestGetConfDump(t *testing.T) { - configstore, err := configstore.NewConfigStore() - assert.NoError(t, err) - - provider, err := converter.NewConverter(converter.Requires{}) - assert.NoError(t, err) - - conf := setup.Datadog() - - reqs := Requires{ - CollectorContrib: collectorcontribimpl.NewComponent(), - Config: conf, - URIs: uriFromFile("simple-dd/config.yaml"), - ConfigStore: configstore, - Lc: &lifecycle{}, - Provider: provider, - } - _, err = NewComponent(reqs) - assert.NoError(t, err) - - t.Run("provided-string", func(t *testing.T) { - actualString, _ := configstore.GetProvidedConfAsString() - actualStringMap, err := yamlBytesToMap([]byte(actualString)) - assert.NoError(t, err) - - expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-provided-result.yaml")) - assert.NoError(t, err) - expectedMap, err := yamlBytesToMap(expectedBytes) - assert.NoError(t, err) - - assert.Equal(t, expectedMap, actualStringMap) - }) - - t.Run("provided-confmap", func(t *testing.T) { - actualConfmap, _ := configstore.GetProvidedConf() - // marshal to yaml and then to map to drop the types for comparison - bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) - assert.NoError(t, err) - actualStringMap, err := yamlBytesToMap(bytesConf) - assert.NoError(t, err) - - expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-provided-result.yaml") - expectedStringMap := expectedMap.ToStringMap() - assert.NoError(t, err) - - assert.Equal(t, expectedStringMap, actualStringMap) - }) - - t.Run("enhanced-string", func(t *testing.T) { - actualString, _ := configstore.GetEnhancedConfAsString() - actualStringMap, err := yamlBytesToMap([]byte(actualString)) - assert.NoError(t, err) - - expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-enhanced-result.yaml")) - assert.NoError(t, err) - expectedMap, err := yamlBytesToMap(expectedBytes) - assert.NoError(t, err) - - assert.Equal(t, expectedMap, actualStringMap) - }) - - t.Run("enhance-confmap", func(t *testing.T) { - actualConfmap, _ := configstore.GetEnhancedConf() - // marshal to yaml and then to map to drop the types for comparison - bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) - assert.NoError(t, err) - actualStringMap, err := yamlBytesToMap(bytesConf) - assert.NoError(t, err) - - expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-enhanced-result.yaml") - expectedStringMap := expectedMap.ToStringMap() - assert.NoError(t, err) - - assert.Equal(t, expectedStringMap, actualStringMap) - }) -} - -func TestGetConfDumpConverterDisabled(t *testing.T) { - configstore, err := configstore.NewConfigStore() - assert.NoError(t, err) - - provider, err := converter.NewConverter(converter.Requires{}) - assert.NoError(t, err) - - conf := setup.Datadog() - conf.SetWithoutSource("otelcollector.converter.enabled", false) - - reqs := Requires{ - CollectorContrib: collectorcontribimpl.NewComponent(), - Config: conf, - URIs: uriFromFile("simple-dd/config.yaml"), - ConfigStore: configstore, - Lc: &lifecycle{}, - Provider: provider, - } - _, err = NewComponent(reqs) - assert.NoError(t, err) - - t.Run("provided-string", func(t *testing.T) { - actualString, _ := configstore.GetProvidedConfAsString() - actualStringMap, err := yamlBytesToMap([]byte(actualString)) - assert.NoError(t, err) - - expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-provided-result.yaml")) - assert.NoError(t, err) - expectedMap, err := yamlBytesToMap(expectedBytes) - assert.NoError(t, err) - - assert.Equal(t, expectedMap, actualStringMap) - }) - - t.Run("provided-confmap", func(t *testing.T) { - actualConfmap, _ := configstore.GetProvidedConf() - // marshal to yaml and then to map to drop the types for comparison - bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) - assert.NoError(t, err) - actualStringMap, err := yamlBytesToMap(bytesConf) - assert.NoError(t, err) - - expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-provided-result.yaml") - expectedStringMap := expectedMap.ToStringMap() - assert.NoError(t, err) - - assert.Equal(t, expectedStringMap, actualStringMap) - }) - - t.Run("enhanced-string", func(t *testing.T) { - actualString, _ := configstore.GetEnhancedConfAsString() - actualStringMap, err := yamlBytesToMap([]byte(actualString)) - assert.NoError(t, err) - - expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-provided-result.yaml")) - assert.NoError(t, err) - expectedMap, err := yamlBytesToMap(expectedBytes) - assert.NoError(t, err) - - assert.Equal(t, expectedMap, actualStringMap) - }) - - t.Run("enhance-confmap", func(t *testing.T) { - actualConfmap, _ := configstore.GetEnhancedConf() - // marshal to yaml and then to map to drop the types for comparison - bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) - assert.NoError(t, err) - actualStringMap, err := yamlBytesToMap(bytesConf) - assert.NoError(t, err) - - expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-provided-result.yaml") - expectedStringMap := expectedMap.ToStringMap() - assert.NoError(t, err) - - assert.Equal(t, expectedStringMap, actualStringMap) - }) -} diff --git a/comp/otelcol/configstore/def/component.go b/comp/otelcol/configstore/def/component.go deleted file mode 100644 index 86943670052e4..0000000000000 --- a/comp/otelcol/configstore/def/component.go +++ /dev/null @@ -1,23 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2024-present Datadog, Inc. - -// Package configstore defines the otel agent configstore component. -package configstore - -import ( - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/otelcol" -) - -// team: opentelemetry - -// Component provides functions to store and expose the provided and enhanced configs. -type Component interface { - AddConfigs(otelcol.ConfigProviderSettings, otelcol.ConfigProviderSettings, otelcol.Factories) error - GetProvidedConf() (*confmap.Conf, error) - GetEnhancedConf() (*confmap.Conf, error) - GetProvidedConfAsString() (string, error) - GetEnhancedConfAsString() (string, error) -} diff --git a/comp/otelcol/configstore/def/go.mod b/comp/otelcol/configstore/def/go.mod deleted file mode 100644 index b8b63f40b0278..0000000000000 --- a/comp/otelcol/configstore/def/go.mod +++ /dev/null @@ -1,88 +0,0 @@ -module github.com/DataDog/datadog-agent/comp/otelcol/configstore/def - -go 1.22.0 - -require ( - go.opentelemetry.io/collector/confmap v0.104.0 - go.opentelemetry.io/collector/otelcol v0.104.0 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf/maps v0.1.1 // indirect - github.com/knadh/koanf/providers/confmap v0.1.0 // indirect - github.com/knadh/koanf/v2 v2.1.1 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect - github.com/prometheus/procfs v0.15.0 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.104.0 // indirect - go.opentelemetry.io/collector/component v0.104.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect - go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/consumer v0.104.0 // indirect - go.opentelemetry.io/collector/exporter v0.104.0 // indirect - go.opentelemetry.io/collector/extension v0.104.0 // indirect - go.opentelemetry.io/collector/featuregate v1.11.0 // indirect - go.opentelemetry.io/collector/pdata v1.11.0 // indirect - go.opentelemetry.io/collector/processor v0.104.0 // indirect - go.opentelemetry.io/collector/receiver v0.104.0 // indirect - go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/comp/otelcol/configstore/def/go.sum b/comp/otelcol/configstore/def/go.sum deleted file mode 100644 index 4c8662c4b1dfa..0000000000000 --- a/comp/otelcol/configstore/def/go.sum +++ /dev/null @@ -1,328 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= -github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= -github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= -github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= -github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= -github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= -go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= -go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= -go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= -go.opentelemetry.io/collector/config/configauth v0.104.0 h1:ULtjugImijpKuLgGVt0E0HwiZT7+uDUEtMquh1ODB24= -go.opentelemetry.io/collector/config/configauth v0.104.0/go.mod h1:Til+nLLrQwwhgmfcGTX4ZRcNuMhdaWhBW1jH9DLTabQ= -go.opentelemetry.io/collector/config/configcompression v1.11.0 h1:oTwbcLh7mWHSDUIZXkRJVdNAMoBGS39XF68goTMOQq8= -go.opentelemetry.io/collector/config/configcompression v1.11.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/confighttp v0.104.0 h1:KSY0FSHSjuPyrR6iA2g5oFTozYFpYcy0ssJny8gTNTQ= -go.opentelemetry.io/collector/config/confighttp v0.104.0/go.mod h1:YgSXwuMYHANzzv+IBjHXaBMG/4G2mrseIpICHj+LB3U= -go.opentelemetry.io/collector/config/configopaque v1.11.0 h1:Pt06PXWVmRaiSX63mzwT8Z9SV/hOc6VHNZbfZ10YY4o= -go.opentelemetry.io/collector/config/configopaque v1.11.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.104.0 h1:bMmLz2+r+REpO7cDOR+srOJHfitqTZfSZCffDpKfwWk= -go.opentelemetry.io/collector/config/configtls v0.104.0/go.mod h1:e33o7TWcKfe4ToLFyGISEPGMgp6ezf3yHRGY4gs9nKk= -go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4fH+54puSBuzm6GQbuEZ2o= -go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= -go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= -go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= -go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= -go.opentelemetry.io/collector/connector v0.104.0/go.mod h1:78SEHel3B3taFnSBg/syW4OV9aU1Ec9KjgbgHf/L8JA= -go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= -go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= -go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= -go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= -go.opentelemetry.io/collector/extension v0.104.0 h1:bftkgFMKya/QIwK+bOxEAPVs/TvTez+s1mlaiUznJkA= -go.opentelemetry.io/collector/extension v0.104.0/go.mod h1:x7K0KyM1JGrtLbafEbRoVp0VpGBHpyx9hu87bsja6S4= -go.opentelemetry.io/collector/extension/auth v0.104.0 h1:SelhccGCrqLThPlkbv6lbAowHsjgOTAWcAPz085IEC4= -go.opentelemetry.io/collector/extension/auth v0.104.0/go.mod h1:s3/C7LTSfa91QK0JPMTRIvH/gCv+a4DGiiNeTAX9OhI= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 h1:rJ9Sw6DR27s6bW7lWBjJhjth5CXpltAHBKIgUFgVwFs= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0/go.mod h1:85Exj8r237PIvaXL1a/S0KeVNnm3kQNpVXtu0O2Zk5k= -go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= -go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= -go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= -go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= -go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= -go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= -go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= -go.opentelemetry.io/collector/receiver v0.104.0 h1:URL1ExkYYd+qbndm7CdGvI2mxzsv/pNfmwJ+1QSQ9/o= -go.opentelemetry.io/collector/receiver v0.104.0/go.mod h1:+enTCZQLf6dRRANWvykXEzrlRw2JDppXJtoYWd/Dd54= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5JI9J3P77SFUrE= -go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/comp/otelcol/configstore/fx/fx.go b/comp/otelcol/configstore/fx/fx.go deleted file mode 100644 index 2f89f318f0554..0000000000000 --- a/comp/otelcol/configstore/fx/fx.go +++ /dev/null @@ -1,23 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2024-present Datadog, Inc. - -// Package configstorefx provides fx access for the configstore component -package configstorefx - -import ( - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" - configstoreimpl "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" -) - -// Module defines the fx options for this component. -func Module() fxutil.Module { - return fxutil.Component( - fxutil.ProvideComponentConstructor( - configstoreimpl.NewConfigStore, - ), - fxutil.ProvideOptional[configstore.Component](), - ) -} diff --git a/comp/otelcol/configstore/impl/configstore.go b/comp/otelcol/configstore/impl/configstore.go deleted file mode 100644 index 3c0922ee2b383..0000000000000 --- a/comp/otelcol/configstore/impl/configstore.go +++ /dev/null @@ -1,127 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2024-present Datadog, Inc. - -// Package configstoreimpl provides the implementation of the otel-agent configstore. -package configstoreimpl - -import ( - "context" - "fmt" - "sync" - - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/otelcol" - "gopkg.in/yaml.v2" -) - -type configStoreImpl struct { - provided *otelcol.Config - enhanced *otelcol.Config - mu sync.RWMutex -} - -// NewConfigStore currently only supports a single URI in the uris slice, and this URI needs to be a file path. -func NewConfigStore() (configstore.Component, error) { - return &configStoreImpl{}, nil -} - -func (c *configStoreImpl) AddConfigs(providedCPS otelcol.ConfigProviderSettings, enhancedCPS otelcol.ConfigProviderSettings, factories otelcol.Factories) error { - // Provided - ocpProvided, err := otelcol.NewConfigProvider(providedCPS) - if err != nil { - return fmt.Errorf("failed to create configprovider: %w", err) - } - providedConf, err := ocpProvided.Get(context.Background(), factories) - if err != nil { - return err - } - c.addProvidedConf(providedConf) - - // Enhanced - ocpEnhanced, err := otelcol.NewConfigProvider(enhancedCPS) - if err != nil { - return fmt.Errorf("failed to create configprovider: %w", err) - } - enhancedConf, err := ocpEnhanced.Get(context.Background(), factories) - if err != nil { - return err - } - c.addEnhancedConf(enhancedConf) - - return nil -} - -// addProvidedConf stores the config into configStoreImpl. -func (c *configStoreImpl) addProvidedConf(config *otelcol.Config) { - c.mu.Lock() - defer c.mu.Unlock() - - c.provided = config -} - -// addEnhancedConf stores the config into configStoreImpl. -func (c *configStoreImpl) addEnhancedConf(config *otelcol.Config) { - c.mu.Lock() - defer c.mu.Unlock() - - c.enhanced = config -} - -// GetProvidedConf returns a string representing the enhanced collector configuration. -func (c *configStoreImpl) GetProvidedConf() (*confmap.Conf, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - conf := confmap.New() - err := conf.Marshal(c.provided) - if err != nil { - return nil, err - } - return conf, nil -} - -// GetEnhancedConf returns a string representing the enhanced collector configuration. -func (c *configStoreImpl) GetEnhancedConf() (*confmap.Conf, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - conf := confmap.New() - err := conf.Marshal(c.enhanced) - if err != nil { - return nil, err - } - return conf, nil -} - -// GetProvidedConf returns a string representing the enhanced collector configuration. -func (c *configStoreImpl) GetProvidedConfAsString() (string, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - return confToString(c.provided) -} - -// GetEnhancedConf returns a string representing the enhanced collector configuration. -func (c *configStoreImpl) GetEnhancedConfAsString() (string, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - return confToString(c.enhanced) -} - -func confToString(conf *otelcol.Config) (string, error) { - cfg := confmap.New() - err := cfg.Marshal(conf) - if err != nil { - return "", err - } - bytesConf, err := yaml.Marshal(cfg.ToStringMap()) - if err != nil { - return "", err - } - - return string(bytesConf), nil -} diff --git a/comp/otelcol/configstore/impl/go.mod b/comp/otelcol/configstore/impl/go.mod deleted file mode 100644 index 8b263d73c0d03..0000000000000 --- a/comp/otelcol/configstore/impl/go.mod +++ /dev/null @@ -1,94 +0,0 @@ -module github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl - -go 1.22.0 - -require ( - go.opentelemetry.io/collector/confmap v0.104.0 - go.opentelemetry.io/collector/otelcol v0.104.0 -) - -replace github.com/DataDog/datadog-agent/comp/otelcol/configstore/def => ../def - -require ( - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def v0.56.0-rc.3 - gopkg.in/yaml.v2 v2.4.0 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.5.0 // indirect - github.com/knadh/koanf/v2 v2.1.1 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect - github.com/prometheus/procfs v0.15.0 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.104.0 // indirect - go.opentelemetry.io/collector/component v0.104.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect - go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/consumer v0.104.0 // indirect - go.opentelemetry.io/collector/exporter v0.104.0 // indirect - go.opentelemetry.io/collector/extension v0.104.0 // indirect - go.opentelemetry.io/collector/featuregate v1.11.0 // indirect - go.opentelemetry.io/collector/pdata v1.11.0 // indirect - go.opentelemetry.io/collector/processor v0.104.0 // indirect - go.opentelemetry.io/collector/receiver v0.104.0 // indirect - go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/comp/otelcol/configstore/impl/go.sum b/comp/otelcol/configstore/impl/go.sum deleted file mode 100644 index d3cae0a850882..0000000000000 --- a/comp/otelcol/configstore/impl/go.sum +++ /dev/null @@ -1,585 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= -github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= -github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= -github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= -github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= -github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= -go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= -go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= -go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= -go.opentelemetry.io/collector/config/configauth v0.104.0 h1:ULtjugImijpKuLgGVt0E0HwiZT7+uDUEtMquh1ODB24= -go.opentelemetry.io/collector/config/configauth v0.104.0/go.mod h1:Til+nLLrQwwhgmfcGTX4ZRcNuMhdaWhBW1jH9DLTabQ= -go.opentelemetry.io/collector/config/configcompression v1.11.0 h1:oTwbcLh7mWHSDUIZXkRJVdNAMoBGS39XF68goTMOQq8= -go.opentelemetry.io/collector/config/configcompression v1.11.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/confighttp v0.104.0 h1:KSY0FSHSjuPyrR6iA2g5oFTozYFpYcy0ssJny8gTNTQ= -go.opentelemetry.io/collector/config/confighttp v0.104.0/go.mod h1:YgSXwuMYHANzzv+IBjHXaBMG/4G2mrseIpICHj+LB3U= -go.opentelemetry.io/collector/config/configopaque v1.11.0 h1:Pt06PXWVmRaiSX63mzwT8Z9SV/hOc6VHNZbfZ10YY4o= -go.opentelemetry.io/collector/config/configopaque v1.11.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.104.0 h1:bMmLz2+r+REpO7cDOR+srOJHfitqTZfSZCffDpKfwWk= -go.opentelemetry.io/collector/config/configtls v0.104.0/go.mod h1:e33o7TWcKfe4ToLFyGISEPGMgp6ezf3yHRGY4gs9nKk= -go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4fH+54puSBuzm6GQbuEZ2o= -go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= -go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= -go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= -go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= -go.opentelemetry.io/collector/connector v0.104.0/go.mod h1:78SEHel3B3taFnSBg/syW4OV9aU1Ec9KjgbgHf/L8JA= -go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= -go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= -go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= -go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= -go.opentelemetry.io/collector/extension v0.104.0 h1:bftkgFMKya/QIwK+bOxEAPVs/TvTez+s1mlaiUznJkA= -go.opentelemetry.io/collector/extension v0.104.0/go.mod h1:x7K0KyM1JGrtLbafEbRoVp0VpGBHpyx9hu87bsja6S4= -go.opentelemetry.io/collector/extension/auth v0.104.0 h1:SelhccGCrqLThPlkbv6lbAowHsjgOTAWcAPz085IEC4= -go.opentelemetry.io/collector/extension/auth v0.104.0/go.mod h1:s3/C7LTSfa91QK0JPMTRIvH/gCv+a4DGiiNeTAX9OhI= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 h1:rJ9Sw6DR27s6bW7lWBjJhjth5CXpltAHBKIgUFgVwFs= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0/go.mod h1:85Exj8r237PIvaXL1a/S0KeVNnm3kQNpVXtu0O2Zk5k= -go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= -go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= -go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= -go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= -go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= -go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= -go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= -go.opentelemetry.io/collector/receiver v0.104.0 h1:URL1ExkYYd+qbndm7CdGvI2mxzsv/pNfmwJ+1QSQ9/o= -go.opentelemetry.io/collector/receiver v0.104.0/go.mod h1:+enTCZQLf6dRRANWvykXEzrlRw2JDppXJtoYWd/Dd54= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5JI9J3P77SFUrE= -go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d h1:Aqf0fiIdUQEj0Gn9mKFFXoQfTTEaNopWpfVyYADxiSg= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Od4k8V1LQSizPRUK4OzZ7TBE/20k+jPczUDAEyvn69Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/comp/otelcol/converter/impl/extensions.go b/comp/otelcol/converter/impl/extensions.go index 1d3911210bf62..7b9e878a7b0e9 100644 --- a/comp/otelcol/converter/impl/extensions.go +++ b/comp/otelcol/converter/impl/extensions.go @@ -7,7 +7,6 @@ package converterimpl import ( - ddextension "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl" "go.opentelemetry.io/collector/confmap" ) @@ -30,7 +29,7 @@ var ( healthCheckConfig any // datadog - datadogName = ddextension.Type.String() + datadogName = "ddflare" datadogEnhancedName = datadogName + "/" + ddAutoconfiguredSuffix datadogConfig any diff --git a/comp/otelcol/converter/impl/go.mod b/comp/otelcol/converter/impl/go.mod index cd6b7408cf61a..8647fcd2669f7 100644 --- a/comp/otelcol/converter/impl/go.mod +++ b/comp/otelcol/converter/impl/go.mod @@ -5,22 +5,25 @@ go 1.22.0 replace ( github.com/DataDog/datadog-agent/cmd/agent/common/path => ../../../../cmd/agent/common/path github.com/DataDog/datadog-agent/comp/api/api/def => ../../../../comp/api/api/def + github.com/DataDog/datadog-agent/comp/api/authtoken => ../../../../comp/api/authtoken github.com/DataDog/datadog-agent/comp/core/config => ../../../../comp/core/config github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../../comp/core/flare/builder github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../../comp/core/flare/types github.com/DataDog/datadog-agent/comp/core/secrets => ../../../../comp/core/secrets github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../../comp/core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../../comp/def - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def => ../../configstore/def/ - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl => ../../configstore/impl/ github.com/DataDog/datadog-agent/comp/otelcol/converter/def => ../def github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def => ../../ddflareextension/def/ github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl => ../../ddflareextension/impl/ + github.com/DataDog/datadog-agent/pkg/api => ../../../../pkg/api github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig + github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil @@ -33,12 +36,12 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil + github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version ) require ( - github.com/DataDog/datadog-agent/comp/core/config v0.55.2 + github.com/DataDog/datadog-agent/comp/core/config v0.56.2 github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/confmap v0.104.0 go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0 @@ -51,140 +54,77 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.0.0-00010101000000-000000000000 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/def v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 // indirect github.com/DataDog/viper v1.13.5 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/klauspost/compress v1.17.9 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect - github.com/magiconair/properties v1.8.1 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/rs/cors v1.11.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.12 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect + github.com/prometheus/client_golang v1.20.2 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect - github.com/spf13/cast v1.5.1 // indirect + github.com/shoenig/test v1.7.1 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector v0.104.0 // indirect - go.opentelemetry.io/collector/component v0.104.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.104.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.11.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.104.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.11.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.104.0 // indirect - go.opentelemetry.io/collector/config/internal v0.104.0 // indirect - go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/consumer v0.104.0 // indirect - go.opentelemetry.io/collector/exporter v0.104.0 // indirect - go.opentelemetry.io/collector/extension v0.104.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect go.opentelemetry.io/collector/featuregate v1.11.0 // indirect - go.opentelemetry.io/collector/otelcol v0.104.0 // indirect - go.opentelemetry.io/collector/pdata v1.11.0 // indirect - go.opentelemetry.io/collector/processor v0.104.0 // indirect - go.opentelemetry.io/collector/receiver v0.104.0 // indirect - go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/otelcol/converter/impl/go.sum b/comp/otelcol/converter/impl/go.sum index 6e2637ed12906..b2d389ba80705 100644 --- a/comp/otelcol/converter/impl/go.sum +++ b/comp/otelcol/converter/impl/go.sum @@ -1,13 +1,10 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= -github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -18,9 +15,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -28,7 +24,6 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -44,14 +39,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -60,7 +51,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -71,46 +61,26 @@ github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -120,37 +90,25 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9G github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= -github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= @@ -166,70 +124,46 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= -github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= -github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0 h1:6dvpPt8pCcV+TfMnnanFk2NQYf9HN1voSS9iIHdW+L8= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0/go.mod h1:MfSM6mt9qH3vHCaj2rlX6IY/7fN+zCLzNJC25XG9rNU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0 h1:SveJtKEP2pXyCbucjrDzbBGQUUgrU+vBMTyUgy0tplc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0/go.mod h1:HdVNjnRruSyRiqXvPBy/ZVumw7zjegmoJmFRgtBnaQU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0 h1:dcs3PHXBShL5+DWmDrNXnESlehQjRjIaVE84GPyZL5E= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0/go.mod h1:Vh707OU/o72qqlDGS+8WVkMCTIlmiTfy3k6PQeq/tgY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0 h1:4ke4j/y7AQnRAyYveB+KGcdjVYEKVrwTxc3BDHagdd0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0/go.mod h1:I2zX9YBggIum9LAHXN1DqqbYOENrHXbXdkXouhwVCHw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0 h1:/koTWTWCFF7tBYkDX5UzCaEc/ceTU8jij/Yzuj0So3M= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0/go.mod h1:KWVekIHTPScOrLKVYOiijxfEdGK5OBhD4EFNBh96ESg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0 h1:4dU16tXhXWUfOYHoDtpEJHYze1ltgMFWvD1jWVeARRI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0/go.mod h1:poM/ch3rxaWlkiGV3ohdEDALhfwx6jaKd1z7xk6iY0o= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0 h1:dOPRk39L5bwQNbxJ7mSUyHan0un/r9DV9X7G+YrktGk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0/go.mod h1:nyUlZ88VgBDoA9SfmUs0RcsVzqts9z0PpLxjFZPjD3w= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.104.0 h1:4ES79GC+1fyDlLmC2ASM7MpKGLx1LIBpL8wE7G3zzSA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.104.0/go.mod h1:h5v/Xn0jreStYi9nyPHjwfYseH8Xe3DznsUNS5R4Oqg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0 h1:Pl4rXXpRG/xJuNWUS3I/w1jViHcrssMf47bGX/Ug/KY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0/go.mod h1:tP4dyc5+g/qoXYb8lmNj+y+Nhphn4MkL23/np0Zhx2g= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0 h1:Vwkk+0+cppH+TrmdiVFWcshhdvh2g2IZEj16V8SLjLw= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0/go.mod h1:QmV2JbLC0lzzi0hMUKv5hJ824wdzvYInjVJsphQQ5Uo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -240,8 +174,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -254,16 +188,11 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -272,39 +201,31 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= -github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -314,38 +235,11 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= -go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= -go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= -go.opentelemetry.io/collector/config/configauth v0.104.0 h1:ULtjugImijpKuLgGVt0E0HwiZT7+uDUEtMquh1ODB24= -go.opentelemetry.io/collector/config/configauth v0.104.0/go.mod h1:Til+nLLrQwwhgmfcGTX4ZRcNuMhdaWhBW1jH9DLTabQ= -go.opentelemetry.io/collector/config/configcompression v1.11.0 h1:oTwbcLh7mWHSDUIZXkRJVdNAMoBGS39XF68goTMOQq8= -go.opentelemetry.io/collector/config/configcompression v1.11.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/configgrpc v0.104.0 h1:E3RtqryQPOm/trJmhlJZj6cCqJNKgv9fOEQvSEpzsFM= -go.opentelemetry.io/collector/config/configgrpc v0.104.0/go.mod h1:tu3ifnJ5pv+4rZcaqNWfvVLjNKb8icSPoClN3THN8PU= -go.opentelemetry.io/collector/config/confighttp v0.104.0 h1:KSY0FSHSjuPyrR6iA2g5oFTozYFpYcy0ssJny8gTNTQ= -go.opentelemetry.io/collector/config/confighttp v0.104.0/go.mod h1:YgSXwuMYHANzzv+IBjHXaBMG/4G2mrseIpICHj+LB3U= -go.opentelemetry.io/collector/config/confignet v0.104.0 h1:i7AOTJf4EQox3SEt1YtQFQR+BwXr3v5D9x3Ai9/ovy8= -go.opentelemetry.io/collector/config/confignet v0.104.0/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= -go.opentelemetry.io/collector/config/configopaque v1.11.0 h1:Pt06PXWVmRaiSX63mzwT8Z9SV/hOc6VHNZbfZ10YY4o= -go.opentelemetry.io/collector/config/configopaque v1.11.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configretry v1.11.0 h1:UdEDD0ThxPU7+n2EiKJxVTvDCGygXu9hTfT6LOQv9DY= -go.opentelemetry.io/collector/config/configretry v1.11.0/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.104.0 h1:bMmLz2+r+REpO7cDOR+srOJHfitqTZfSZCffDpKfwWk= -go.opentelemetry.io/collector/config/configtls v0.104.0/go.mod h1:e33o7TWcKfe4ToLFyGISEPGMgp6ezf3yHRGY4gs9nKk= -go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4fH+54puSBuzm6GQbuEZ2o= -go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0 h1:7BhJk71V8xhm8wUpuHG4CVRAPu8JajKj8VmGZ6zS7SA= @@ -360,86 +254,18 @@ go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0 h1:y07I19l go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0/go.mod h1:WV1HOa0z3Ln5ZkwEW7Cm2pCHkfzYY9kBe0dLy8DqeYA= go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0 h1:itBGhyEbX+iz8kz3nc4PYxQx4bL7y87xXNUcGnbKPuY= go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0/go.mod h1:iPVsTBkRFHZ21UEfSGWk8c4maOzTp6BWWpTk+l6PjJI= -go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= -go.opentelemetry.io/collector/connector v0.104.0/go.mod h1:78SEHel3B3taFnSBg/syW4OV9aU1Ec9KjgbgHf/L8JA= -go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= -go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= -go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= -go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= -go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= -go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0/go.mod h1:fAF7Q3Xh0OkxYWUycdrNNDXkyz3nhHIRKDkez0aQ6zg= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 h1:JkNCOj7DdyJhcYIaRqtS/X+YtAPRjE4pcruyY6LoM7c= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0/go.mod h1:6rs4Xugs7tIC3IFbAC+fj56zLiVc7osXC5UTjk/Mkw4= -go.opentelemetry.io/collector/extension v0.104.0 h1:bftkgFMKya/QIwK+bOxEAPVs/TvTez+s1mlaiUznJkA= -go.opentelemetry.io/collector/extension v0.104.0/go.mod h1:x7K0KyM1JGrtLbafEbRoVp0VpGBHpyx9hu87bsja6S4= -go.opentelemetry.io/collector/extension/auth v0.104.0 h1:SelhccGCrqLThPlkbv6lbAowHsjgOTAWcAPz085IEC4= -go.opentelemetry.io/collector/extension/auth v0.104.0/go.mod h1:s3/C7LTSfa91QK0JPMTRIvH/gCv+a4DGiiNeTAX9OhI= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 h1:rJ9Sw6DR27s6bW7lWBjJhjth5CXpltAHBKIgUFgVwFs= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0/go.mod h1:85Exj8r237PIvaXL1a/S0KeVNnm3kQNpVXtu0O2Zk5k= go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= -go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= -go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= -go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= -go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= -go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= -go.opentelemetry.io/collector/processor/batchprocessor v0.104.0 h1:6xXvHYkPjwM1zdzliDM2H/omTGgIOkY96JTCln7CFZQ= -go.opentelemetry.io/collector/processor/batchprocessor v0.104.0/go.mod h1:f1VfVdiOlqtJDAvQy8YONEee19nJ3haxNeiMPy59w8M= -go.opentelemetry.io/collector/receiver v0.104.0 h1:URL1ExkYYd+qbndm7CdGvI2mxzsv/pNfmwJ+1QSQ9/o= -go.opentelemetry.io/collector/receiver v0.104.0/go.mod h1:+enTCZQLf6dRRANWvykXEzrlRw2JDppXJtoYWd/Dd54= -go.opentelemetry.io/collector/receiver/nopreceiver v0.104.0 h1:xkfiTIGEXMXosYbZe8C8tIEZiw+gEL8QhCxz8slSYcM= -go.opentelemetry.io/collector/receiver/nopreceiver v0.104.0/go.mod h1:9vZPqdvOBDh9fKugWiv8WIINkF+TFpOw7RhvZxctZ9w= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 h1:t9cACuSc7kY09guws7VyB/z9QnG7/zWLC1NQ29WH4+o= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0/go.mod h1:sPIIO4F6uit1i/XQgfe2WryvdO5Hr16bQgZTaXcR8mM= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5JI9J3P77SFUrE= -go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -462,20 +288,14 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -488,19 +308,12 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -511,23 +324,17 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -537,46 +344,18 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d h1:Aqf0fiIdUQEj0Gn9mKFFXoQfTTEaNopWpfVyYADxiSg= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Od4k8V1LQSizPRUK4OzZ7TBE/20k+jPczUDAEyvn69Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -596,7 +375,6 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/comp/otelcol/ddflareextension/README.md b/comp/otelcol/ddflareextension/README.md new file mode 100644 index 0000000000000..13f5902877c4e --- /dev/null +++ b/comp/otelcol/ddflareextension/README.md @@ -0,0 +1,72 @@ +# ddflare Extension + +The ddflare extension component allows inclusion of otel-agent data in the datadog-agent [flare](https://docs.datadoghq.com/agent/troubleshooting/send_a_flare/?tab=agent). A flare can be triggered by the core agent process, or remote-config. + +The ddflare extension also provides the relevant metadata for otel-agent configuration and inventory tracking in Fleet Automation. This metadata is periodically collected by the core-agent which then submits that data to the backend. + + + +## Extension Configuration + +The datadogextension will be added automatically by the [converter component](../converter/README.md). If you opted out of the converter, or you want to change the defaults, you are able to configure the extension as so: + +*Collector config:* +``` +extensions: + datadog: + port: 7777 +``` + +*Agent Config:* +``` +otel-agent: + enabled: true + flare_port: 7777 +``` + +The port is the location in which the otel-agent will expose the data required to build the flare. The core agent then fetches the data from this port. + +## Data collected by flare + +### Configurations + +The flare will collect both the provided collector config and the enhanced config (enhanced via [converter](../converter/README.md)). + +The provided collector configs can be found in `otel/otel-flare/customer.cfg` and the enhanced config can be found in `otel/otel-flare/customer.cfg`. + +### Environment variables + +The flare will collect all environment variables, and these can be found in `otel/otel-flare/environment.json`. + +### Extension data + +The flare also adds data collected from extensions. These extensions are added automatically by the [converter component](../converter/README.md). The data collected is from extensions: +- health_check: Found in `otel/otel-flare/health_check`. + +Contains a JSON of the latest health check, for example: + +``` +{"status":"Server available","upSince":"2024-08-14T14:54:00.575804+02:00","uptime":"28.470434291s"} +``` +- pprof: Found in `otel/otel-flare/pprof` + +Contains a allocs (`dd-autoconfigured_debug_pprof_allocs`), heap (`dd-autoconfigured_debug_pprof_heap`) and cpu (`dd-autoconfigured_debug_pprof_profile`) profile. Profiles can be opened with the [pprof tool](https://github.com/google/pprof), e.g. +``` +go tool pprof -http=: otel/otel-flare/pprof/dd-autoconfigured_debug_pprof_heap.dat +``` + +- zpages: Found in `otel/otel-flare/zpages` + +Contains extension (`dd-autoconfigured_debug_extensionz`), feature (`dd-autoconfigured_debug_featurez`), pipeline (`dd-autoconfigured_debug_pipelinez`), service (`dd-autoconfigured_debug_servicez`) and trace (`dd-autoconfigured_debug_tracez`) data. The data is in html format, and can be input in a html viewer. + +### Logs + +The flare will collect the otel-agent logs which can be found in `logs/otel-agent.log`. + +### Raw data + +The raw response can be found in `otel-response.json`. This corresponds to the data that is exposed at the datadog extensions port. + +## Data collected for inventory + +The ddflare extension submits a variety of metadata for fleet automation including version, command, configuration. You can find more information about the Inventory Agent Payload in [comp/metadata/inventoryotel/README.md](../../metadata/inventoryotel/README.md). diff --git a/comp/otelcol/ddflareextension/image/ddflareextensiondiagram.png b/comp/otelcol/ddflareextension/image/ddflareextensiondiagram.png new file mode 100644 index 0000000000000..02894b083267b Binary files /dev/null and b/comp/otelcol/ddflareextension/image/ddflareextensiondiagram.png differ diff --git a/comp/otelcol/ddflareextension/impl/config.go b/comp/otelcol/ddflareextension/impl/config.go index e5c7aa8a66aba..640ed5c2030af 100644 --- a/comp/otelcol/ddflareextension/impl/config.go +++ b/comp/otelcol/ddflareextension/impl/config.go @@ -10,10 +10,10 @@ import ( "errors" "fmt" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/otelcol" ) type extractDebugEndpoint func(conf *confmap.Conf) (string, error) @@ -32,7 +32,8 @@ var ( type Config struct { HTTPConfig *confighttp.ServerConfig `mapstructure:",squash"` - ConfigStore configstore.Component + factories *otelcol.Factories + configProviderSettings otelcol.ConfigProviderSettings } var _ component.Config = (*Config)(nil) diff --git a/comp/otelcol/ddflareextension/impl/config_test.go b/comp/otelcol/ddflareextension/impl/config_test.go index b3c15eab8a784..6151e67518391 100644 --- a/comp/otelcol/ddflareextension/impl/config_test.go +++ b/comp/otelcol/ddflareextension/impl/config_test.go @@ -9,19 +9,13 @@ package ddflareextensionimpl import ( "testing" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap" ) -func getTestConfig(t *testing.T) *Config { - conv, err := configstore.NewConfigStore() - require.NoError(t, err) - +func getTestConfig() *Config { return &Config{ - ConfigStore: conv, HTTPConfig: &confighttp.ServerConfig{ Endpoint: "localhost:0", }, @@ -29,7 +23,7 @@ func getTestConfig(t *testing.T) *Config { } func TestValidate(t *testing.T) { - cfg := getTestConfig(t) + cfg := getTestConfig() err := cfg.Validate() assert.NoError(t, err) @@ -44,7 +38,7 @@ func TestValidate(t *testing.T) { } func TestUnmarshal(t *testing.T) { - cfg := getTestConfig(t) + cfg := getTestConfig() endpoint := "localhost:1234" diff --git a/comp/otelcol/ddflareextension/impl/configstore.go b/comp/otelcol/ddflareextension/impl/configstore.go new file mode 100644 index 0000000000000..2c96798795f22 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/configstore.go @@ -0,0 +1,93 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package ddflareextensionimpl defines the OpenTelemetry Extension implementation. +package ddflareextensionimpl + +import ( + "sync" + + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/otelcol" + "gopkg.in/yaml.v2" +) + +type configStore struct { + provided *otelcol.Config + enhanced *otelcol.Config + mu sync.RWMutex +} + +// setProvidedConf stores the config into configStoreImpl. +func (c *configStore) setProvidedConf(config *otelcol.Config) { + c.mu.Lock() + defer c.mu.Unlock() + + c.provided = config +} + +// setEnhancedConf stores the config into configStoreImpl. +func (c *configStore) setEnhancedConf(config *otelcol.Config) { + c.mu.Lock() + defer c.mu.Unlock() + + c.enhanced = config +} + +func confToString(conf *otelcol.Config) (string, error) { + cfg := confmap.New() + err := cfg.Marshal(conf) + if err != nil { + return "", err + } + bytesConf, err := yaml.Marshal(cfg.ToStringMap()) + if err != nil { + return "", err + } + + return string(bytesConf), nil +} + +// getProvidedConf returns a string representing the enhanced collector configuration. +func (c *configStore) getProvidedConf() (*confmap.Conf, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + conf := confmap.New() + err := conf.Marshal(c.provided) + if err != nil { + return nil, err + } + return conf, nil +} + +// getEnhancedConf returns a string representing the enhanced collector configuration. +func (c *configStore) getEnhancedConf() (*confmap.Conf, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + conf := confmap.New() + err := conf.Marshal(c.enhanced) + if err != nil { + return nil, err + } + return conf, nil +} + +// getProvidedConfAsString returns a string representing the enhanced collector configuration string. +func (c *configStore) getProvidedConfAsString() (string, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + return confToString(c.provided) +} + +// getEnhancedConfAsString returns a string representing the enhanced collector configuration string. +func (c *configStore) getEnhancedConfAsString() (string, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + return confToString(c.enhanced) +} diff --git a/comp/otelcol/ddflareextension/impl/configstore_test.go b/comp/otelcol/ddflareextension/impl/configstore_test.go new file mode 100644 index 0000000000000..b6d3c2e616681 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/configstore_test.go @@ -0,0 +1,197 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package ddflareextensionimpl defines the OpenTelemetry Extension implementation. +package ddflareextensionimpl + +import ( + "context" + "os" + "path/filepath" + "testing" + + converterimpl "github.com/DataDog/datadog-agent/comp/otelcol/converter/impl" + "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter" + "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector" + "go.opentelemetry.io/collector/component/componenttest" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/confmap/converter/expandconverter" + "go.opentelemetry.io/collector/confmap/provider/envprovider" + "go.opentelemetry.io/collector/confmap/provider/fileprovider" + "go.opentelemetry.io/collector/confmap/provider/httpprovider" + "go.opentelemetry.io/collector/confmap/provider/httpsprovider" + "go.opentelemetry.io/collector/confmap/provider/yamlprovider" + "go.opentelemetry.io/collector/otelcol" + "gopkg.in/yaml.v2" +) + +// this is only used for config unmarshalling. +func addFactories(factories otelcol.Factories) { + factories.Exporters[datadogexporter.Type] = datadogexporter.NewFactory(nil, nil, nil, nil, nil) + factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactory(nil, nil) + factories.Connectors[component.MustNewType("datadog")] = datadogconnector.NewFactory() + factories.Extensions[Type] = NewFactory(nil, otelcol.ConfigProviderSettings{}) +} + +func TestGetConfDump(t *testing.T) { + // get factories + factories, err := components() + assert.NoError(t, err) + addFactories(factories) + + // extension config + config := Config{ + HTTPConfig: &confighttp.ServerConfig{ + Endpoint: "localhost:0", + }, + factories: &factories, + configProviderSettings: newConfigProviderSettings(uriFromFile("simple-dd/config.yaml"), false), + } + extension, err := NewExtension(context.TODO(), &config, componenttest.NewNopTelemetrySettings(), component.BuildInfo{}) + assert.NoError(t, err) + + ext, ok := extension.(*ddExtension) + assert.True(t, ok) + + t.Run("provided-string", func(t *testing.T) { + actualString, _ := ext.configStore.getProvidedConfAsString() + actualStringMap, err := yamlBytesToMap([]byte(actualString)) + assert.NoError(t, err) + + expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-provided-result.yaml")) + assert.NoError(t, err) + expectedMap, err := yamlBytesToMap(expectedBytes) + assert.NoError(t, err) + + assert.Equal(t, expectedMap, actualStringMap) + }) + + t.Run("provided-confmap", func(t *testing.T) { + actualConfmap, _ := ext.configStore.getProvidedConf() + // marshal to yaml and then to map to drop the types for comparison + bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) + assert.NoError(t, err) + actualStringMap, err := yamlBytesToMap(bytesConf) + assert.NoError(t, err) + + expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-provided-result.yaml") + assert.NoError(t, err) + // this step is required for type matching + expectedStringMapBytes, err := yaml.Marshal(expectedMap.ToStringMap()) + assert.NoError(t, err) + expectedStringMap, err := yamlBytesToMap(expectedStringMapBytes) + assert.NoError(t, err) + + assert.Equal(t, expectedStringMap, actualStringMap) + }) + + conf := confmapFromResolverSettings(t, newResolverSettings(uriFromFile("simple-dd/config.yaml"), true)) + err = ext.NotifyConfig(context.TODO(), conf) + assert.NoError(t, err) + + t.Run("enhanced-string", func(t *testing.T) { + actualString, _ := ext.configStore.getEnhancedConfAsString() + actualStringMap, err := yamlBytesToMap([]byte(actualString)) + assert.NoError(t, err) + + expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-enhanced-result.yaml")) + assert.NoError(t, err) + expectedMap, err := yamlBytesToMap(expectedBytes) + assert.NoError(t, err) + + assert.Equal(t, expectedMap, actualStringMap) + }) + + t.Run("enhance-confmap", func(t *testing.T) { + actualConfmap, _ := ext.configStore.getEnhancedConf() + // marshal to yaml and then to map to drop the types for comparison + bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) + assert.NoError(t, err) + actualStringMap, err := yamlBytesToMap(bytesConf) + assert.NoError(t, err) + + expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-enhanced-result.yaml") + assert.NoError(t, err) + // this step is required for type matching + expectedStringMapBytes, err := yaml.Marshal(expectedMap.ToStringMap()) + assert.NoError(t, err) + expectedStringMap, err := yamlBytesToMap(expectedStringMapBytes) + assert.NoError(t, err) + + assert.Equal(t, expectedStringMap, actualStringMap) + }) +} + +func confmapFromResolverSettings(t *testing.T, resolverSettings confmap.ResolverSettings) *confmap.Conf { + resolver, err := confmap.NewResolver(resolverSettings) + assert.NoError(t, err) + conf, err := resolver.Resolve(context.TODO()) + assert.NoError(t, err) + return conf +} + +func uriFromFile(filename string) []string { + return []string{filepath.Join("testdata", filename)} +} + +func yamlBytesToMap(bytesConfig []byte) (map[string]any, error) { + var configMap = map[string]interface{}{} + err := yaml.Unmarshal(bytesConfig, configMap) + if err != nil { + return nil, err + } + return configMap, nil +} + +type converterFactory struct { + converter confmap.Converter +} + +func (c *converterFactory) Create(_ confmap.ConverterSettings) confmap.Converter { + return c.converter +} + +func newResolverSettings(uris []string, enhanced bool) confmap.ResolverSettings { + return confmap.ResolverSettings{ + URIs: uris, + ProviderFactories: []confmap.ProviderFactory{ + fileprovider.NewFactory(), + envprovider.NewFactory(), + yamlprovider.NewFactory(), + httpprovider.NewFactory(), + httpsprovider.NewFactory(), + }, + ConverterFactories: newConverterFactorie(enhanced), + } +} + +func newConverterFactorie(enhanced bool) []confmap.ConverterFactory { + converterFactories := []confmap.ConverterFactory{ + expandconverter.NewFactory(), + } + + converter, err := converterimpl.NewConverter(converterimpl.Requires{}) + if err != nil { + return []confmap.ConverterFactory{} + } + + if enhanced { + converterFactories = append(converterFactories, &converterFactory{converter: converter}) + } + + return converterFactories +} + +func newConfigProviderSettings(uris []string, enhanced bool) otelcol.ConfigProviderSettings { + return otelcol.ConfigProviderSettings{ + ResolverSettings: newResolverSettings(uris, enhanced), + } +} diff --git a/comp/otelcol/ddflareextension/impl/extension.go b/comp/otelcol/ddflareextension/impl/extension.go index fca2149b9544d..1ecd095adab4f 100644 --- a/comp/otelcol/ddflareextension/impl/extension.go +++ b/comp/otelcol/ddflareextension/impl/extension.go @@ -10,15 +10,17 @@ import ( "context" "encoding/json" "fmt" - "net" "net/http" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/otelcol" "go.uber.org/zap" extensionDef "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def" "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl/internal/metadata" + "github.com/DataDog/datadog-agent/pkg/version" ) // Type exports the internal metadata type for easy reference @@ -31,40 +33,41 @@ type ddExtension struct { cfg *Config // Extension configuration. telemetry component.TelemetrySettings - server *http.Server - tlsListener net.Listener + server *server info component.BuildInfo debug extensionDef.DebugSourceResponse + configStore *configStore } var _ extension.Extension = (*ddExtension)(nil) -// NewExtension creates a new instance of the extension. -func NewExtension(_ context.Context, cfg *Config, telemetry component.TelemetrySettings, info component.BuildInfo) (extensionDef.Component, error) { - ext := &ddExtension{ - cfg: cfg, - telemetry: telemetry, - info: info, - debug: extensionDef.DebugSourceResponse{ - Sources: map[string]extensionDef.OTelFlareSource{}, - }, - } +// NotifyConfig implements the ConfigWatcher interface, which allows this extension +// to be notified of the Collector's effective configuration. See interface: +// https://github.com/open-telemetry/opentelemetry-collector/blob/d0fde2f6b98f13cbbd8657f8188207ac7d230ed5/extension/extension.go#L46. +// This method is called during the startup process by the Collector's Service right after +// calling Start. +func (ext *ddExtension) NotifyConfig(_ context.Context, conf *confmap.Conf) error { + var cfg *configSettings var err error - ext.server, ext.tlsListener, err = buildHTTPServer(cfg.HTTPConfig.Endpoint, ext) - if err != nil { - return nil, err + + if cfg, err = unmarshal(conf, *ext.cfg.factories); err != nil { + return fmt.Errorf("cannot unmarshal the configuration: %w", err) } - return ext, nil -} -// Start is called when the extension is started. -func (ext *ddExtension) Start(_ context.Context, host component.Host) error { - ext.telemetry.Logger.Info("Starting DD Extension HTTP server", zap.String("url", ext.cfg.HTTPConfig.Endpoint)) + config := &otelcol.Config{ + Receivers: cfg.Receivers.Configs(), + Processors: cfg.Processors.Configs(), + Exporters: cfg.Exporters.Configs(), + Connectors: cfg.Connectors.Configs(), + Extensions: cfg.Extensions.Configs(), + Service: cfg.Service, + } + + ext.configStore.setEnhancedConf(config) // List configured Extensions - configstore := ext.cfg.ConfigStore - c, err := configstore.GetEnhancedConf() + c, err := ext.configStore.getEnhancedConf() if err != nil { return err } @@ -74,7 +77,7 @@ func (ext *ddExtension) Start(_ context.Context, host component.Host) error { return nil } - extensions := host.GetExtensions() + extensions := config.Extensions for extension := range extensions { extractor, ok := supportedDebugExtensions[extension.Type().String()] if !ok { @@ -120,8 +123,46 @@ func (ext *ddExtension) Start(_ context.Context, host component.Host) error { } } + return nil +} + +// NewExtension creates a new instance of the extension. +func NewExtension(_ context.Context, cfg *Config, telemetry component.TelemetrySettings, info component.BuildInfo) (extensionDef.Component, error) { + ocpProvided, err := otelcol.NewConfigProvider(cfg.configProviderSettings) + if err != nil { + return nil, fmt.Errorf("failed to create configprovider: %w", err) + } + + providedConf, err := ocpProvided.Get(context.Background(), *cfg.factories) + if err != nil { + return nil, err + } + + ext := &ddExtension{ + cfg: cfg, + telemetry: telemetry, + info: info, + configStore: &configStore{}, + debug: extensionDef.DebugSourceResponse{ + Sources: map[string]extensionDef.OTelFlareSource{}, + }, + } + + ext.configStore.setProvidedConf(providedConf) + + ext.server, err = newServer(cfg.HTTPConfig.Endpoint, ext) + if err != nil { + return nil, err + } + return ext, nil +} + +// Start is called when the extension is started. +func (ext *ddExtension) Start(_ context.Context, _ component.Host) error { + ext.telemetry.Logger.Info("Starting DD Extension HTTP server", zap.String("url", ext.cfg.HTTPConfig.Endpoint)) + go func() { - if err := ext.server.Serve(ext.tlsListener); err != nil && err != http.ErrServerClosed { + if err := ext.server.start(); err != nil && err != http.ErrServerClosed { ext.telemetry.ReportStatus(component.NewFatalErrorEvent(err)) ext.telemetry.Logger.Info("DD Extension HTTP could not start", zap.String("err", err.Error())) } @@ -136,18 +177,18 @@ func (ext *ddExtension) Shutdown(ctx context.Context) error { ext.telemetry.Logger.Info("Shutting down HTTP server") // Give the server a grace period to finish handling requests. - return ext.server.Shutdown(ctx) + return ext.server.shutdown(ctx) } // ServeHTTP the request handler for the extension. func (ext *ddExtension) ServeHTTP(w http.ResponseWriter, _ *http.Request) { - customer, err := ext.cfg.ConfigStore.GetProvidedConfAsString() + customer, err := ext.configStore.getProvidedConfAsString() if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "Unable to get provided config\n") return } - enhanced, err := ext.cfg.ConfigStore.GetEnhancedConfAsString() + enhanced, err := ext.configStore.getEnhancedConfAsString() if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "Unable to get enhanced config\n") @@ -162,7 +203,7 @@ func (ext *ddExtension) ServeHTTP(w http.ResponseWriter, _ *http.Request) { resp := extensionDef.Response{ BuildInfoResponse: extensionDef.BuildInfoResponse{ - AgentVersion: ext.info.Version, + AgentVersion: version.AgentVersion, AgentCommand: ext.info.Command, AgentDesc: ext.info.Description, ExtensionVersion: ext.info.Version, diff --git a/comp/otelcol/ddflareextension/impl/extension_test.go b/comp/otelcol/ddflareextension/impl/extension_test.go index f1bd7dd398407..5690a829a96d2 100644 --- a/comp/otelcol/ddflareextension/impl/extension_test.go +++ b/comp/otelcol/ddflareextension/impl/extension_test.go @@ -11,107 +11,49 @@ import ( "encoding/json" "net/http" "net/http/httptest" - "path/filepath" + "strings" "testing" ddflareextension "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def" + apiutil "github.com/DataDog/datadog-agent/pkg/api/util" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" - spanmetricsconnector "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" - healthcheckextension "github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension" - pprofextension "github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension" - transformprocessor "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/fileprovider" - "go.opentelemetry.io/collector/confmap/provider/yamlprovider" "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" - otlpexporter "go.opentelemetry.io/collector/exporter/otlpexporter" - otlphttpexporter "go.opentelemetry.io/collector/exporter/otlphttpexporter" + "go.opentelemetry.io/collector/exporter/otlpexporter" + "go.opentelemetry.io/collector/exporter/otlphttpexporter" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/zpagesextension" "go.opentelemetry.io/collector/otelcol" "go.opentelemetry.io/collector/processor" - batchprocessor "go.opentelemetry.io/collector/processor/batchprocessor" + "go.opentelemetry.io/collector/processor/batchprocessor" "go.opentelemetry.io/collector/receiver" - nopreceiver "go.opentelemetry.io/collector/receiver/nopreceiver" - otlpreceiver "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/collector/receiver/nopreceiver" + "go.opentelemetry.io/collector/receiver/otlpreceiver" "go.uber.org/zap" ) -var cpSettings = otelcol.ConfigProviderSettings{ - ResolverSettings: confmap.ResolverSettings{ - URIs: []string{filepath.Join("testdata", "config.yaml")}, - ProviderFactories: []confmap.ProviderFactory{ - fileprovider.NewFactory(), - yamlprovider.NewFactory(), - }, - }, -} - -func components() (otelcol.Factories, error) { - var err error - factories := otelcol.Factories{} - - factories.Extensions, err = extension.MakeFactoryMap( - healthcheckextension.NewFactory(), - pprofextension.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - factories.Receivers, err = receiver.MakeFactoryMap( - nopreceiver.NewFactory(), - otlpreceiver.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - factories.Exporters, err = exporter.MakeFactoryMap( - otlpexporter.NewFactory(), - otlphttpexporter.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - factories.Processors, err = processor.MakeFactoryMap( - batchprocessor.NewFactory(), - transformprocessor.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - factories.Connectors, err = connector.MakeFactoryMap( - spanmetricsconnector.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - return factories, nil -} - func getExtensionTestConfig(t *testing.T) *Config { - cf, err := configstore.NewConfigStore() - assert.NoError(t, err) - factories, err := components() assert.NoError(t, err) - - cf.AddConfigs(cpSettings, cpSettings, factories) return &Config{ HTTPConfig: &confighttp.ServerConfig{ Endpoint: "localhost:0", }, - ConfigStore: cf, + configProviderSettings: newConfigProviderSettings(uriFromFile("config.yaml"), false), + factories: &factories, } } @@ -124,22 +66,21 @@ func getTestExtension(t *testing.T) (ddflareextension.Component, error) { return NewExtension(c, cfg, telemetry, info) } -func TestNewExtension(t *testing.T) { - ext, err := getTestExtension(t) - assert.NoError(t, err) - assert.NotNil(t, ext) +func getResponseToHandlerRequest(t *testing.T, tokenOverride string) *httptest.ResponseRecorder { - _, ok := ext.(*ddExtension) - assert.True(t, ok) -} - -func TestExtensionHTTPHandler(t *testing.T) { // Create a request req, err := http.NewRequest("GET", "/", nil) if err != nil { t.Fatal(err) } + token := apiutil.GetAuthToken() + if tokenOverride != "" { + token = tokenOverride + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+token) + // Create a ResponseRecorder rr := httptest.NewRecorder() @@ -158,8 +99,41 @@ func TestExtensionHTTPHandler(t *testing.T) { ddExt.Start(context.TODO(), host) + conf := confmapFromResolverSettings(t, newResolverSettings(uriFromFile("config.yaml"), false)) + ddExt.NotifyConfig(context.TODO(), conf) + assert.NoError(t, err) + + handler := ddExt.server.srv.Handler + // Call the handler's ServeHTTP method - ddExt.ServeHTTP(rr, req) + handler.ServeHTTP(rr, req) + + return rr +} + +func TestNewExtension(t *testing.T) { + ext, err := getTestExtension(t) + assert.NoError(t, err) + assert.NotNil(t, ext) + + _, ok := ext.(*ddExtension) + assert.True(t, ok) +} + +func TestExtensionHTTPHandler(t *testing.T) { + oldConfig := pkgconfigsetup.Datadog() + defer func() { + pkgconfigsetup.SetDatadog(oldConfig) + }() + + conf := pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + pkgconfigsetup.SetDatadog(conf) + err := apiutil.CreateAndSetAuthToken(conf) + if err != nil { + t.Fatal(err) + } + + rr := getResponseToHandlerRequest(t, "") // Check the response status code assert.Equalf(t, http.StatusOK, rr.Code, @@ -187,6 +161,27 @@ func TestExtensionHTTPHandler(t *testing.T) { } } +func TestExtensionHTTPHandlerBadToken(t *testing.T) { + oldConfig := pkgconfigsetup.Datadog() + defer func() { + pkgconfigsetup.SetDatadog(oldConfig) + }() + + conf := pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + pkgconfigsetup.SetDatadog(conf) + err := apiutil.CreateAndSetAuthToken(conf) + if err != nil { + t.Fatal(err) + } + + rr := getResponseToHandlerRequest(t, "badtoken") + + // Check the response status code + assert.Equalf(t, http.StatusForbidden, rr.Code, + "handler returned wrong status code: got %v want %v", rr.Code, http.StatusForbidden) + +} + type hostWithExtensions struct { component.Host exts map[component.ID]component.Component @@ -202,3 +197,51 @@ func newHostWithExtensions(exts map[component.ID]component.Component) component. func (h *hostWithExtensions) GetExtensions() map[component.ID]component.Component { return h.exts } + +func components() (otelcol.Factories, error) { + var err error + factories := otelcol.Factories{} + + factories.Extensions, err = extension.MakeFactoryMap( + healthcheckextension.NewFactory(), + pprofextension.NewFactory(), + zpagesextension.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Receivers, err = receiver.MakeFactoryMap( + nopreceiver.NewFactory(), + otlpreceiver.NewFactory(), + prometheusreceiver.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Exporters, err = exporter.MakeFactoryMap( + otlpexporter.NewFactory(), + otlphttpexporter.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Processors, err = processor.MakeFactoryMap( + batchprocessor.NewFactory(), + transformprocessor.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Connectors, err = connector.MakeFactoryMap( + spanmetricsconnector.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + return factories, nil +} diff --git a/comp/otelcol/ddflareextension/impl/factory.go b/comp/otelcol/ddflareextension/impl/factory.go index eb6967ebaed44..64c52aa9c966c 100644 --- a/comp/otelcol/ddflareextension/impl/factory.go +++ b/comp/otelcol/ddflareextension/impl/factory.go @@ -10,11 +10,11 @@ import ( "context" "fmt" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/otelcol" ) const ( @@ -24,20 +24,22 @@ const ( type ddExtensionFactory struct { extension.Factory - configstore configstore.Component + factories *otelcol.Factories + configProviderSettings otelcol.ConfigProviderSettings } // NewFactory creates a factory for HealthCheck extension. -func NewFactory(configstore configstore.Component) extension.Factory { +func NewFactory(factories *otelcol.Factories, configProviderSettings otelcol.ConfigProviderSettings) extension.Factory { return &ddExtensionFactory{ - configstore: configstore, + factories: factories, + configProviderSettings: configProviderSettings, } } func (f *ddExtensionFactory) CreateExtension(ctx context.Context, set extension.Settings, cfg component.Config) (extension.Extension, error) { - config := &Config{ - ConfigStore: f.configstore, + factories: f.factories, + configProviderSettings: f.configProviderSettings, } config.HTTPConfig = cfg.(*Config).HTTPConfig return NewExtension(ctx, config, set.TelemetrySettings, set.BuildInfo) @@ -48,7 +50,6 @@ func (f *ddExtensionFactory) CreateDefaultConfig() component.Config { HTTPConfig: &confighttp.ServerConfig{ Endpoint: fmt.Sprintf("localhost:%d", defaultHTTPPort), }, - ConfigStore: f.configstore, } } diff --git a/comp/otelcol/ddflareextension/impl/factory_test.go b/comp/otelcol/ddflareextension/impl/factory_test.go index 16ae62a1cd8c5..5a38c3513c0cb 100644 --- a/comp/otelcol/ddflareextension/impl/factory_test.go +++ b/comp/otelcol/ddflareextension/impl/factory_test.go @@ -10,18 +10,16 @@ import ( "context" "testing" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl/internal/metadata" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/extension" ) func getTestFactory(t *testing.T) extension.Factory { - conv, err := configstore.NewConfigStore() - require.NoError(t, err) + factories, err := components() + assert.NoError(t, err) - return NewFactory(conv) + return NewFactory(&factories, newConfigProviderSettings(uriFromFile("config.yaml"), false)) } func TestNewFactory(t *testing.T) { diff --git a/comp/otelcol/ddflareextension/impl/go.mod b/comp/otelcol/ddflareextension/impl/go.mod index eba0f9a9e9570..76390a61a90af 100644 --- a/comp/otelcol/ddflareextension/impl/go.mod +++ b/comp/otelcol/ddflareextension/impl/go.mod @@ -3,30 +3,51 @@ module github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl go 1.22.0 replace ( + github.com/DataDog/datadog-agent/cmd/agent/common/path => ../../../../cmd/agent/common/path + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../api/api/def + github.com/DataDog/datadog-agent/comp/api/authtoken => ../../../api/authtoken github.com/DataDog/datadog-agent/comp/core/config => ../../../core/config github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../core/flare/builder github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../core/flare/types github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface => ../../../core/hostname/hostnameinterface github.com/DataDog/datadog-agent/comp/core/log/def => ../../../core/log/def + github.com/DataDog/datadog-agent/comp/core/log/mock => ../../../core/log/mock github.com/DataDog/datadog-agent/comp/core/secrets => ../../../core/secrets + github.com/DataDog/datadog-agent/comp/core/status => ../../../core/status + github.com/DataDog/datadog-agent/comp/core/tagger/types => ../../../core/tagger/types + github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../../../core/tagger/utils github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../def + github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder => ../../../forwarder/defaultforwarder + github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface => ../../../forwarder/orchestrator/orchestratorinterface github.com/DataDog/datadog-agent/comp/logs/agent/config => ../../../logs/agent/config - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def => ../../configstore/def - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl => ../../configstore/impl + github.com/DataDog/datadog-agent/comp/otelcol/converter/def => ../../../otelcol/converter/def + github.com/DataDog/datadog-agent/comp/otelcol/converter/impl => ../../converter/impl github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def => ../../ddflareextension/def github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline => ../../logsagentpipeline github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl => ../../logsagentpipeline/logsagentpipelineimpl + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter => ../../otlp/components/exporter/datadogexporter github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter => ../../otlp/components/exporter/logsagentexporter + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter => ../../otlp/components/exporter/serializerexporter github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient => ../../otlp/components/metricsclient + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor => ../../otlp/components/processor/infraattributesprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor => ../../otlp/components/statsprocessor + github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil => ../../../otelcol/otlp/testutil + github.com/DataDog/datadog-agent/comp/serializer/compression => ../../../serializer/compression + github.com/DataDog/datadog-agent/comp/trace/agent/def => ../../../trace/agent/def github.com/DataDog/datadog-agent/comp/trace/compression/def => ../../../trace/compression/def github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip => ../../../trace/compression/impl-gzip github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd => ../../../trace/compression/impl-zstd + github.com/DataDog/datadog-agent/pkg/aggregator/ckey => ../../../../pkg/aggregator/ckey + github.com/DataDog/datadog-agent/pkg/api => ../../../../pkg/api github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../../../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../../../../pkg/logs/client @@ -40,116 +61,361 @@ replace ( github.com/DataDog/datadog-agent/pkg/logs/sources => ../../../../pkg/logs/sources github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface => ../../../../pkg/logs/status/statusinterface github.com/DataDog/datadog-agent/pkg/logs/status/utils => ../../../../pkg/logs/status/utils + github.com/DataDog/datadog-agent/pkg/logs/util/testutils => ../../../../pkg/logs/util/testutils + github.com/DataDog/datadog-agent/pkg/metrics => ../../../../pkg/metrics github.com/DataDog/datadog-agent/pkg/obfuscate => ../../../../pkg/obfuscate + github.com/DataDog/datadog-agent/pkg/orchestrator/model => ../../../../pkg/orchestrator/model + github.com/DataDog/datadog-agent/pkg/process/util/api => ../../../../pkg/process/util/api github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto github.com/DataDog/datadog-agent/pkg/remoteconfig/state => ../../../../pkg/remoteconfig/state + github.com/DataDog/datadog-agent/pkg/serializer => ../../../../pkg/serializer github.com/DataDog/datadog-agent/pkg/status/health => ../../../../pkg/status/health + github.com/DataDog/datadog-agent/pkg/tagger/types => ../../../../pkg/tagger/types + github.com/DataDog/datadog-agent/pkg/tagset => ../../../../pkg/tagset github.com/DataDog/datadog-agent/pkg/telemetry => ../../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/trace => ../../../../pkg/trace github.com/DataDog/datadog-agent/pkg/util/backoff => ../../../../pkg/util/backoff + github.com/DataDog/datadog-agent/pkg/util/buf => ../../../../pkg/util/buf github.com/DataDog/datadog-agent/pkg/util/cgroups => ../../../../pkg/util/cgroups + github.com/DataDog/datadog-agent/pkg/util/common => ../../../../pkg/util/common + github.com/DataDog/datadog-agent/pkg/util/containers/image => ../../../../pkg/util/containers/image github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/http => ../../../../pkg/util/http + github.com/DataDog/datadog-agent/pkg/util/json => ../../../../pkg/util/json github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/sort => ../../../../pkg/util/sort github.com/DataDog/datadog-agent/pkg/util/startstop => ../../../../pkg/util/startstop github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../../../pkg/util/statstracker github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/tagger => ../../../../pkg/util/tagger + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version + github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea + github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector => github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 + github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e ) require ( - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/otelcol/converter/impl v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/api v0.56.2 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 + github.com/DataDog/datadog-agent/pkg/version v0.57.0 + github.com/gorilla/mux v1.8.1 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.104.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 go.opentelemetry.io/collector/config/confighttp v0.104.0 go.opentelemetry.io/collector/confmap v0.104.0 + go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0 + go.opentelemetry.io/collector/confmap/provider/envprovider v0.104.0 go.opentelemetry.io/collector/confmap/provider/fileprovider v0.104.0 + go.opentelemetry.io/collector/confmap/provider/httpprovider v0.104.0 + go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0 go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0 go.opentelemetry.io/collector/connector v0.104.0 go.opentelemetry.io/collector/exporter v0.104.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 go.opentelemetry.io/collector/extension v0.104.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 go.opentelemetry.io/collector/otelcol v0.104.0 go.opentelemetry.io/collector/processor v0.104.0 go.opentelemetry.io/collector/processor/batchprocessor v0.104.0 go.opentelemetry.io/collector/receiver v0.104.0 go.opentelemetry.io/collector/receiver/nopreceiver v0.104.0 go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 + go.opentelemetry.io/collector/service v0.104.0 go.uber.org/zap v1.27.0 + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 + gopkg.in/yaml.v2 v2.4.0 ) require ( + cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/Code-Hex/go-generics-cache v1.3.1 // indirect + github.com/DataDog/agent-payload/v5 v5.0.123 // indirect + github.com/DataDog/datadog-agent/comp/core/config v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/log/def v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/comp/core/log/mock v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/types v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/def v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.0.0-20240525065430-d0b647bcb646 // indirect + github.com/DataDog/datadog-agent/comp/serializer/compression v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/trace/agent/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/trace/compression/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/utils v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/processor v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/sender v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/metrics v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/process/util/api v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/tagger/types v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect + github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect + github.com/DataDog/datadog-go/v5 v5.5.0 // indirect + github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect + github.com/DataDog/go-sqllexer v0.0.15 // indirect + github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect + github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 // indirect + github.com/DataDog/sketches-go v1.4.6 // indirect + github.com/DataDog/viper v1.13.5 // indirect + github.com/DataDog/zstd v1.5.5 // indirect + github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go v1.53.11 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/briandowns/spinner v1.23.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dennwc/varint v1.0.0 // indirect + github.com/digitalocean/godo v1.109.0 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/docker v25.0.6+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.12.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-resty/resty/v2 v2.12.0 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect + github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-json v0.10.3 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.2 // indirect + github.com/gophercloud/gophercloud v1.8.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/hashicorp/consul/api v1.29.1 // indirect + github.com/hashicorp/cronexpr v1.1.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.4 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/hetznercloud/hcloud-go/v2 v2.6.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.1.11 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/karrick/godirwalk v1.17.0 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect + github.com/linode/linodego v1.33.0 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/miekg/dns v1.1.58 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/mostynb/go-grpc-compression v1.2.3 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.104.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/outcaste-io/ristretto v0.2.3 // indirect + github.com/ovh/go-ovh v1.4.3 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/philhofer/fwd v1.1.2 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.2 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e // indirect + github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 // indirect github.com/rs/cors v1.11.0 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect github.com/shirou/gopsutil/v4 v4.24.5 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/shoenig/test v1.7.1 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stormcat24/protodep v0.1.8 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/tilinna/clock v1.1.0 // indirect + github.com/tinylib/msgp v1.1.9 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect + github.com/twmb/murmur3 v1.1.8 // indirect + github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector v0.104.0 // indirect @@ -163,16 +429,18 @@ require ( go.opentelemetry.io/collector/config/configtls v0.104.0 // indirect go.opentelemetry.io/collector/config/internal v0.104.0 // indirect go.opentelemetry.io/collector/consumer v0.104.0 // indirect + go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 // indirect go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect go.opentelemetry.io/collector/featuregate v1.11.0 // indirect + go.opentelemetry.io/collector/otelcol/otelcoltest v0.104.0 // indirect go.opentelemetry.io/collector/pdata v1.11.0 // indirect go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect go.opentelemetry.io/contrib/config v0.7.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/contrib/zpages v0.52.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect @@ -182,21 +450,42 @@ require ( go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.6.0 // indirect + golang.org/x/tools v0.25.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/grpc v1.64.0 // indirect + google.golang.org/api v0.169.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.2.0 // indirect + k8s.io/api v0.29.3 // indirect + k8s.io/apimachinery v0.29.3 // indirect + k8s.io/client-go v0.29.3 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/comp/otelcol/ddflareextension/impl/go.sum b/comp/otelcol/ddflareextension/impl/go.sum index 8abd0addada8f..b52bd4c77197d 100644 --- a/comp/otelcol/ddflareextension/impl/go.sum +++ b/comp/otelcol/ddflareextension/impl/go.sum @@ -1,33 +1,239 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= +github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/DataDog/agent-payload/v5 v5.0.123 h1:fc/mME+zXBPo8i8690rVJXeqlZ1o+8ixIzNu43XP+o8= +github.com/DataDog/agent-payload/v5 v5.0.123/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs= +github.com/DataDog/datadog-agent/comp/core/log v0.54.0 h1:wP3bJua8qmURqLXkmYxrbELMJQ2oO1MuVNfxHJT4wiQ= +github.com/DataDog/datadog-agent/comp/core/log v0.54.0/go.mod h1:mtMxZiwg13b4bHgDf8xE6FHgTcadzI5Cc0lx2MSY1mE= +github.com/DataDog/datadog-api-client-go/v2 v2.26.0 h1:bZr0hu+hx8L91+yU5EGw8wK3FlCVEIashpx+cylWsf0= +github.com/DataDog/datadog-api-client-go/v2 v2.26.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= +github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= +github.com/DataDog/go-sqllexer v0.0.15 h1:rUUu52dP8EQhJLnUw0MIAxZp0BQx2fOTuMztr3vtHUU= +github.com/DataDog/go-sqllexer v0.0.15/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= +github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= +github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k= +github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee/go.mod h1:nTot/Iy0kW16bXgXr6blEc8gFeAS7vTqYlhAxh+dbc0= +github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= +github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= +github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= +github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= +github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= +github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f/go.mod h1:oXfOhM/Kr8OvqS6tVqJwxPBornV0yrx3bc+l0BDr7PQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 h1:N4xzkSD2BkRwEZSPf3C2eUZxjS5trpo4gOwRh8mu+BA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.53.11 h1:KcmduYvX15rRqt4ZU/7jKkmDxU/G87LJ9MUI0yQJh00= +github.com/aws/aws-sdk-go v1.53.11/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= +github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU= +github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= +github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= +github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -36,57 +242,238 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA= +github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= +github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= +github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= +github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= +github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= +github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= +github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= +github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= +github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= @@ -97,37 +484,118 @@ github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPgh github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= +github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw= +github.com/linode/linodego v1.33.0/go.mod h1:dSJJgIwqZCF5wnpuC6w5cyIbRtcexAm7uVvuJopGB40= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= +github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 h1:Kpfqjwp+nlgqacXkSS8T8iGiTMTFo8NoT8AoRomDOpU= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0/go.mod h1:ymbGC/jEXTq8mgHsxzV1PjVGHmV5hSQXmkYkFfGfuLw= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0 h1:6dvpPt8pCcV+TfMnnanFk2NQYf9HN1voSS9iIHdW+L8= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0/go.mod h1:MfSM6mt9qH3vHCaj2rlX6IY/7fN+zCLzNJC25XG9rNU= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.103.0 h1:2XWbSIoIKQyFvn97pS4uc0Pxwe7EWCmZEg2r/+kiL58= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.103.0/go.mod h1:WrnJQRKaivYllAC2B1KeCI5uYiYsZv3Hcbd6iQfr9Jg= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.104.0 h1:lkf7Bof0rbPy2/0+tu+FAgEzwVKmJKcMlx8xsR26TdA= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.104.0/go.mod h1:B6dQmrNwW1q7rOadf57fwIaZHYzwrovTSSEEaiFyf0w= github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0 h1:SveJtKEP2pXyCbucjrDzbBGQUUgrU+vBMTyUgy0tplc= github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0/go.mod h1:HdVNjnRruSyRiqXvPBy/ZVumw7zjegmoJmFRgtBnaQU= github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0 h1:dcs3PHXBShL5+DWmDrNXnESlehQjRjIaVE84GPyZL5E= github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0/go.mod h1:Vh707OU/o72qqlDGS+8WVkMCTIlmiTfy3k6PQeq/tgY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.104.0 h1:KmZvS+RN2w4zxMuX1yiobjkN8fvwBUJ+vl5LkO3O7bk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.104.0/go.mod h1:fc7PiNmgpw+RlWzdWcuoiH9mIlDgiryy70ZjEJC+nlY= github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0 h1:4ke4j/y7AQnRAyYveB+KGcdjVYEKVrwTxc3BDHagdd0= github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0/go.mod h1:I2zX9YBggIum9LAHXN1DqqbYOENrHXbXdkXouhwVCHw= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0 h1:/koTWTWCFF7tBYkDX5UzCaEc/ceTU8jij/Yzuj0So3M= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0/go.mod h1:KWVekIHTPScOrLKVYOiijxfEdGK5OBhD4EFNBh96ESg= github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0 h1:4dU16tXhXWUfOYHoDtpEJHYze1ltgMFWvD1jWVeARRI= github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0/go.mod h1:poM/ch3rxaWlkiGV3ohdEDALhfwx6jaKd1z7xk6iY0o= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.104.0 h1:j5EAcIE5iA03KdrfrmXmplfPc1Lybt6D8RAmuumoq60= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.104.0/go.mod h1:VS66oUydCMwiWl1BFmLs7iNy4lGsfVYsriXr/d1fpAk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.104.0 h1:hB2LSx2h/Xvnfam8jXu8sGy3M6YVSD6bcI5saenp+kY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.104.0/go.mod h1:jTZf5CwMDiILww23FgxvLdIkCPH952ItR/3dJUb/sSk= github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0 h1:dOPRk39L5bwQNbxJ7mSUyHan0un/r9DV9X7G+YrktGk= github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0/go.mod h1:nyUlZ88VgBDoA9SfmUs0RcsVzqts9z0PpLxjFZPjD3w= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.104.0 h1:J4VwD+t7XpMuhdgd5KwhI5f17bOKHDD862szUW2ulVo= @@ -138,56 +606,217 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.104.0/go.mod h1:tImy4FWNu1qpaXRVaNi2BU+TmZHtYgLO6LbB6mspZio= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0 h1:Pl4rXXpRG/xJuNWUS3I/w1jViHcrssMf47bGX/Ug/KY= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0/go.mod h1:tP4dyc5+g/qoXYb8lmNj+y+Nhphn4MkL23/np0Zhx2g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0 h1:dNDjrDhJmSv2JoK3n2hX/nyf/twTTnLuvAhQTMHGQ5M= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0/go.mod h1:SyCZC+vcI2lnyb7iqH0/6dGgCihuqtCxGmLaZToxSHk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.104.0 h1:iNr5/wS/0Rg4PnPO2Zf3Yj4Qc1RooVQ/7U7jKzocyPo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.104.0/go.mod h1:4bLfc6BnVKRp3yY+ueEUEeyNWjW/InCGbFs9ZA7o/ko= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.104.0 h1:eCceBGaatwEKLiOzYjrYc4zNSMMfb+5Of9VNUnTYU80= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.104.0/go.mod h1:/mkH8dlZxnFRoccQoXkN/XOP6Q7G/1F8XTUO9+xZw7U= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.104.0 h1:avbrkX0c51UjJE13RBqk/Z5QyO/J7J2/O9FIBaJ+Few= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.104.0/go.mod h1:tkmsd1veEEsXtFdYSvoZU7S80INqCbNUGkEGQAivlV0= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.104.0 h1:W2OartqDicbzoLjAp2MCi+FIt2FBy5PyeYce0kIuerc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.104.0/go.mod h1:I2so4Vn+ROaCECo0bdQXNxyUjY9tbq1JvcyuWPETLcM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.104.0 h1:AnzZUTLBT5kADIOTE3NKqXK214sqnkilQqXkqgLjhJs= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.104.0/go.mod h1:5FQezrJhOulRFWMnrpo3Z9O/qWySgDNniPp0p2mFJs0= github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0 h1:Vwkk+0+cppH+TrmdiVFWcshhdvh2g2IZEj16V8SLjLw= github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0/go.mod h1:QmV2JbLC0lzzi0hMUKv5hJ824wdzvYInjVJsphQQ5Uo= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.104.0 h1:Nwkj5EFH90NxWPcl4qeef5AX+A1COWn1Xy1mkzuyIHE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.104.0/go.mod h1:NJwlpVFJu2Dd1mEqCHzSXSNmd5JDhWGVDqo1Oi3RZKk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/openshift/api v3.9.0+incompatible h1:fJ/KsefYuZAjmrr3+5U9yZIZbTOpVkDDLDLFresAeYs= +github.com/openshift/api v3.9.0+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= +github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 h1:ZHRIMCFIJN1p9LsJt4HQ+akDrys4PrYnXzOWI5LK03I= +github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142/go.mod h1:fjS8r9mqDVsPb5td3NehsNOAWa4uiFkYEfVZioQ2gH0= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= +github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= +github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= +github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e h1:UmqAuY2OyDoog8+l5FybViJE5B2r+UxVGCUwFTsY5AA= +github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e/go.mod h1:+0ld+ozir7zWFcHA2vVpWAKxXakIioEjPPNOqH+J3ZA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 h1:arwj11zP0yJIxIRiDn22E0H8PxfF7TsTrc2wIPFIsf4= +github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9/go.mod h1:SKZx6stCn03JN3BOWTwvVIO2ajMkb/zQdTceXYhKw/4= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stormcat24/protodep v0.1.8 h1:FOycjjkjZiastf21aRoCjtoVdhsoBE8mZ0RvY6AHqFE= +github.com/stormcat24/protodep v0.1.8/go.mod h1:6OoSZD5GGomKfmH1LvfJxNIRvYhewFXH5+eNv8h4wOM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= +github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I= +github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= +github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= +github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= +github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= +github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= +github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= @@ -216,8 +845,16 @@ go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4 go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0 h1:7BhJk71V8xhm8wUpuHG4CVRAPu8JajKj8VmGZ6zS7SA= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0/go.mod h1:o2xTZJpc65SyYPOAGOjyvWwQEqYSWT4Q4/gMfOYpAzc= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.104.0 h1:/3iSlUHH1Q3xeZc55oVekd4dibXzqgphXZI7EaYJ+ak= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.104.0/go.mod h1:RZDXvP81JwvIGeq3rvDBrRKMUfn2BeKCmppHm4Qm0D8= go.opentelemetry.io/collector/confmap/provider/fileprovider v0.104.0 h1:B+nMVlIUQxuP52CZSegGuA2z9S+Cv2XwFb2a/TLFPhc= go.opentelemetry.io/collector/confmap/provider/fileprovider v0.104.0/go.mod h1:O0RcaP/I/kn7JHrwohUfj6AwvQYLxjbqg/HnjkvLLTw= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.104.0 h1:6UreSAu64Ft3VfKWE3sjcmf+mWMyWemSsrjS/fjRPpQ= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.104.0/go.mod h1:+vP6R5i9h+oYJNjp4bQHvtSHEu1t+CgSKIeZYZZRQXA= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0 h1:y07I19lmp9VHZ58PJ3nwwd1wqumnIBeMxTNBSh/Vn6k= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0/go.mod h1:WV1HOa0z3Ln5ZkwEW7Cm2pCHkfzYY9kBe0dLy8DqeYA= go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0 h1:itBGhyEbX+iz8kz3nc4PYxQx4bL7y87xXNUcGnbKPuY= go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0/go.mod h1:iPVsTBkRFHZ21UEfSGWk8c4maOzTp6BWWpTk+l6PjJI= go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= @@ -226,6 +863,8 @@ go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhR go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= +go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw= +go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0/go.mod h1:fAF7Q3Xh0OkxYWUycdrNNDXkyz3nhHIRKDkez0aQ6zg= go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 h1:JkNCOj7DdyJhcYIaRqtS/X+YtAPRjE4pcruyY6LoM7c= @@ -240,6 +879,8 @@ go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zR go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.104.0 h1:duPbOTahDcDP+XupC/KkHvebb8+NVKh7LzIpiEuKwLU= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.104.0/go.mod h1:cNosA2o77fGp2N4Ofs5h6HBdHhlPQAbKBjBIc1l+8O4= go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= @@ -270,8 +911,8 @@ go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= @@ -290,93 +931,387 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= +google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d h1:Aqf0fiIdUQEj0Gn9mKFFXoQfTTEaNopWpfVyYADxiSg= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Od4k8V1LQSizPRUK4OzZ7TBE/20k+jPczUDAEyvn69Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -385,16 +1320,68 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 h1:umQdVO0Ytx+kYadhuJNjFtDgIsIEBnKrOTvNuu8ClKI= +gopkg.in/zorkian/go-datadog-api.v2 v2.30.0/go.mod h1:kx0CSMRpzEZfx/nFH62GLU4stZjparh/BRpM89t4XCQ= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= +gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/comp/otelcol/ddflareextension/impl/server.go b/comp/otelcol/ddflareextension/impl/server.go index 2a4c74f66fda8..4b3c0fcf0c104 100644 --- a/comp/otelcol/ddflareextension/impl/server.go +++ b/comp/otelcol/ddflareextension/impl/server.go @@ -7,6 +7,7 @@ package ddflareextensionimpl import ( + "context" "crypto/rand" "crypto/rsa" "crypto/tls" @@ -18,20 +19,38 @@ import ( "net" "net/http" "time" + + "github.com/DataDog/datadog-agent/pkg/api/util" + "github.com/gorilla/mux" ) -func buildHTTPServer(endpoint string, handler http.Handler) (*http.Server, net.Listener, error) { +type server struct { + srv *http.Server + listener net.Listener +} + +// validateToken - validates token for legacy API +func validateToken(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if err := util.Validate(w, r); err != nil { + return + } + next.ServeHTTP(w, r) + }) +} + +func newServer(endpoint string, handler http.Handler) (*server, error) { // Generate a self-signed certificate key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { - return nil, nil, err + return nil, err } serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - return nil, nil, err + return nil, err } template := x509.Certificate{ @@ -57,12 +76,12 @@ func buildHTTPServer(endpoint string, handler http.Handler) (*http.Server, net.L certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key) if err != nil { - return nil, nil, err + return nil, err } // parse the resulting certificate so we can use it again _, err = x509.ParseCertificate(certDER) if err != nil { - return nil, nil, err + return nil, err } // PEM encode the certificate (this is a standard TLS encoding) b := pem.Block{Type: "CERTIFICATE", Bytes: certDER} @@ -74,13 +93,13 @@ func buildHTTPServer(endpoint string, handler http.Handler) (*http.Server, net.L pair, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { - return nil, nil, fmt.Errorf("unable to generate TLS key pair: %v", err) + return nil, fmt.Errorf("unable to generate TLS key pair: %v", err) } tlsCertPool := x509.NewCertPool() ok := tlsCertPool.AppendCertsFromPEM(certPEM) if !ok { - return nil, nil, fmt.Errorf("unable to add new certificate to pool") + return nil, fmt.Errorf("unable to add new certificate to pool") } // Create TLS configuration @@ -90,22 +109,35 @@ func buildHTTPServer(endpoint string, handler http.Handler) (*http.Server, net.L MinVersion: tls.VersionTLS12, } - server := &http.Server{ + r := mux.NewRouter() + r.Handle("/", handler) + + r.Use(validateToken) + + s := &http.Server{ Addr: endpoint, TLSConfig: tlsConfig, - Handler: handler, + Handler: r, } listener, err := net.Listen("tcp", endpoint) if err != nil { - return nil, nil, err + return nil, err } - tlsListener := tls.NewListener(listener, server.TLSConfig) - go func() { - _ = server.Serve(tlsListener) - }() + tlsListener := tls.NewListener(listener, s.TLSConfig) + + return &server{ + srv: s, + listener: tlsListener, + }, nil - return server, tlsListener, nil +} + +func (s *server) start() error { + return s.srv.Serve(s.listener) +} +func (s *server) shutdown(ctx context.Context) error { + return s.srv.Shutdown(ctx) } diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml new file mode 100644 index 0000000000000..505453b479f2b --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml @@ -0,0 +1,236 @@ +connectors: {} +exporters: + datadog: + api: + fail_on_invalid_key: false + key: "[REDACTED]" + site: datadoghq.com + auth: null + compression: "" + cookies: null + disable_keep_alives: false + endpoint: "" + headers: {} + host_metadata: + enabled: true + hostname_source: config_or_system + tags: [] + hostname: "" + http2_ping_timeout: 0s + http2_read_idle_timeout: 0s + idle_conn_timeout: null + logs: + batch_wait: 5 + compression_level: 6 + dialer: + timeout: 0s + dump_payloads: false + endpoint: https://agent-http-intake.logs.datadoghq.com + use_compression: true + max_conns_per_host: null + max_idle_conns: null + max_idle_conns_per_host: null + metrics: + apm_stats_receiver_addr: "" + delta_ttl: 3600 + dialer: + timeout: 0s + enabled: false + endpoint: https://api.datadoghq.com + histograms: + mode: distributions + send_aggregation_metrics: false + send_count_sum_metrics: false + instrumentation_library_metadata_as_tags: false + instrumentation_scope_metadata_as_tags: false + resource_attributes_as_tags: false + summaries: + mode: gauges + sums: + cumulative_monotonic_mode: to_delta + initial_cumulative_monotonic_value: auto + tag_cardinality: "" + tags: "" + only_metadata: false + proxy_url: "" + read_buffer_size: 0 + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 1000 + storage: null + timeout: 15s + tls: + ca_file: "" + ca_pem: "[REDACTED]" + cert_file: "" + cert_pem: "[REDACTED]" + cipher_suites: [] + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + key_pem: "[REDACTED]" + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + traces: + compute_top_level_by_span_kind: true + dialer: + timeout: 0s + endpoint: https://trace.agent.datadoghq.com + ignore_resources: [] + span_name_as_resource_name: true + span_name_remappings: {} + trace_buffer: 0 + write_buffer_size: 0 +extensions: + ddflare/dd-autoconfigured: + auth: null + compression_algorithms: [] + cors: null + endpoint: localhost:7777 + include_metadata: false + max_request_body_size: 0 + response_headers: {} + tls: null + health_check/dd-autoconfigured: + auth: null + check_collector_pipeline: + enabled: false + exporter_failure_threshold: 5 + interval: 5m + compression_algorithms: [] + cors: null + endpoint: localhost:13133 + include_metadata: false + max_request_body_size: 0 + path: / + response_body: null + response_headers: {} + tls: null + pprof/dd-autoconfigured: + block_profile_fraction: 0 + dialer: + timeout: 0s + endpoint: localhost:1777 + mutex_profile_fraction: 0 + save_to_file: "" + zpages/dd-autoconfigured: + auth: null + compression_algorithms: [] + cors: null + endpoint: localhost:55679 + include_metadata: false + max_request_body_size: 0 + response_headers: {} + tls: null +processors: + infraattributes/dd-autoconfigured: + cardinality: 0 + logs: + log: [] + metrics: + metric: [] + traces: + span: [] +receivers: + otlp: + protocols: + grpc: null + http: null + prometheus: + config: + global: + evaluation_interval: 1m + scrape_interval: 1m + scrape_protocols: + - OpenMetricsText1.0.0 + - OpenMetricsText0.0.1 + - PrometheusText0.0.4 + scrape_timeout: 10s + scrape_configs: + - enable_compression: true + enable_http2: true + follow_redirects: true + honor_timestamps: true + job_name: datadog-agent + metrics_path: /metrics + scheme: http + scrape_interval: 5s + scrape_protocols: + - OpenMetricsText1.0.0 + - OpenMetricsText0.0.1 + - PrometheusText0.0.4 + scrape_timeout: 5s + static_configs: + - targets: + - 0.0.0.0:8888 + track_timestamps_staleness: false + report_extra_scrape_metrics: false + start_time_metric_regex: "" + target_allocator: null + trim_metric_suffixes: false + use_start_time_metric: false +service: + extensions: + - pprof/dd-autoconfigured + - zpages/dd-autoconfigured + - health_check/dd-autoconfigured + - ddflare/dd-autoconfigured + pipelines: + logs: + exporters: + - datadog + processors: + - infraattributes/dd-autoconfigured + receivers: + - otlp + metrics: + exporters: + - datadog + processors: + - infraattributes/dd-autoconfigured + receivers: + - otlp + - prometheus + traces: + exporters: + - datadog + processors: + - infraattributes/dd-autoconfigured + receivers: + - otlp + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: + - stderr + initial_fields: {} + level: info + output_paths: + - stderr + sampling: + enabled: true + initial: 10 + thereafter: 100 + tick: 10s + metrics: + address: :8888 + level: Normal + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml new file mode 100644 index 0000000000000..7e10c57812684 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml @@ -0,0 +1,181 @@ +connectors: {} +exporters: + datadog: + api: + fail_on_invalid_key: false + key: '[REDACTED]' + site: datadoghq.com + auth: null + compression: "" + cookies: null + disable_keep_alives: false + endpoint: "" + headers: {} + host_metadata: + enabled: true + hostname_source: config_or_system + tags: [] + hostname: "" + http2_ping_timeout: 0s + http2_read_idle_timeout: 0s + idle_conn_timeout: null + logs: + batch_wait: 5 + compression_level: 6 + dialer: + timeout: 0s + dump_payloads: false + endpoint: https://agent-http-intake.logs.datadoghq.com + use_compression: true + max_conns_per_host: null + max_idle_conns: null + max_idle_conns_per_host: null + metrics: + apm_stats_receiver_addr: "" + dialer: + timeout: 0s + delta_ttl: 3600 + enabled: false + endpoint: https://api.datadoghq.com + histograms: + mode: distributions + send_aggregation_metrics: false + send_count_sum_metrics: false + instrumentation_library_metadata_as_tags: false + instrumentation_scope_metadata_as_tags: false + resource_attributes_as_tags: false + summaries: + mode: gauges + sums: + cumulative_monotonic_mode: to_delta + initial_cumulative_monotonic_value: auto + tag_cardinality: "" + tags: "" + only_metadata: false + proxy_url: "" + read_buffer_size: 0 + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 1000 + storage: null + timeout: 15s + tls: + ca_file: "" + ca_pem: '[REDACTED]' + cert_file: "" + cert_pem: '[REDACTED]' + cipher_suites: [] + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + key_pem: '[REDACTED]' + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + traces: + compute_top_level_by_span_kind: true + dialer: + timeout: 0s + endpoint: https://trace.agent.datadoghq.com + ignore_resources: [] + span_name_as_resource_name: true + span_name_remappings: {} + trace_buffer: 0 + write_buffer_size: 0 +extensions: {} +processors: {} +receivers: + otlp: + protocols: + grpc: null + http: null + prometheus: + config: + global: + evaluation_interval: 1m + scrape_interval: 1m + scrape_protocols: + - OpenMetricsText1.0.0 + - OpenMetricsText0.0.1 + - PrometheusText0.0.4 + scrape_timeout: 10s + scrape_configs: + - enable_compression: true + enable_http2: true + follow_redirects: true + honor_timestamps: true + job_name: datadog-agent + metrics_path: /metrics + scheme: http + scrape_interval: 5s + scrape_protocols: + - OpenMetricsText1.0.0 + - OpenMetricsText0.0.1 + - PrometheusText0.0.4 + scrape_timeout: 5s + static_configs: + - targets: + - 0.0.0.0:8888 + track_timestamps_staleness: false + report_extra_scrape_metrics: false + start_time_metric_regex: "" + target_allocator: null + trim_metric_suffixes: false + use_start_time_metric: false +service: + extensions: [] + pipelines: + logs: + exporters: + - datadog + processors: [] + receivers: + - otlp + metrics: + exporters: + - datadog + processors: [] + receivers: + - otlp + - prometheus + traces: + exporters: + - datadog + processors: [] + receivers: + - otlp + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: + - stderr + initial_fields: {} + level: info + output_paths: + - stderr + sampling: + enabled: true + initial: 10 + thereafter: 100 + tick: 10s + metrics: + address: :8888 + level: Normal + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config.yaml new file mode 100644 index 0000000000000..6939a55d7d783 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config.yaml @@ -0,0 +1,26 @@ +receivers: + otlp: + prometheus: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 5s + static_configs: + - targets: ['0.0.0.0:8888'] + +exporters: + datadog: + api: + key: '12345' + +service: + pipelines: + traces: + receivers: [otlp] + exporters: [datadog] + metrics: + receivers: [otlp, prometheus] + exporters: [datadog] + logs: + receivers: [otlp] + exporters: [datadog] diff --git a/comp/otelcol/ddflareextension/impl/unmarshaler.go b/comp/otelcol/ddflareextension/impl/unmarshaler.go new file mode 100644 index 0000000000000..9b48c23ea34d0 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/unmarshaler.go @@ -0,0 +1,115 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package ddflareextensionimpl defines the OpenTelemetry Extension implementation. +package ddflareextensionimpl + +import ( + "fmt" + + "golang.org/x/exp/maps" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/otelcol" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/service" + "go.opentelemetry.io/collector/service/telemetry" +) + +type configSettings struct { + Receivers *configs[receiver.Factory] `mapstructure:"receivers"` + Processors *configs[processor.Factory] `mapstructure:"processors"` + Exporters *configs[exporter.Factory] `mapstructure:"exporters"` + Connectors *configs[connector.Factory] `mapstructure:"connectors"` + Extensions *configs[extension.Factory] `mapstructure:"extensions"` + Service service.Config `mapstructure:"service"` +} + +// unmarshal the configSettings from a confmap.Conf. +// After the config is unmarshalled, `Validate()` must be called to validate. +func unmarshal(v *confmap.Conf, factories otelcol.Factories) (*configSettings, error) { + + telFactory := telemetry.NewFactory() + defaultTelConfig := *telFactory.CreateDefaultConfig().(*telemetry.Config) + + // Unmarshal top level sections and validate. + cfg := &configSettings{ + Receivers: newConfigs(factories.Receivers), + Processors: newConfigs(factories.Processors), + Exporters: newConfigs(factories.Exporters), + Connectors: newConfigs(factories.Connectors), + Extensions: newConfigs(factories.Extensions), + // TODO: Add a component.ServiceFactory to allow this to be defined by the Service. + Service: service.Config{ + Telemetry: defaultTelConfig, + }, + } + + return cfg, v.Unmarshal(&cfg) +} + +type configs[F component.Factory] struct { + cfgs map[component.ID]component.Config + + factories map[component.Type]F +} + +func newConfigs[F component.Factory](factories map[component.Type]F) *configs[F] { + return &configs[F]{factories: factories} +} + +func (c *configs[F]) Configs() map[component.ID]component.Config { + return c.cfgs +} + +func (c *configs[F]) Unmarshal(conf *confmap.Conf) error { + rawCfgs := make(map[component.ID]map[string]any) + if err := conf.Unmarshal(&rawCfgs); err != nil { + return err + } + + // Prepare resulting map. + c.cfgs = make(map[component.ID]component.Config) + // Iterate over raw configs and create a config for each. + for id := range rawCfgs { + // Find factory based on component kind and type that we read from config source. + factory, ok := c.factories[id.Type()] + if !ok { + return errorUnknownType(id, maps.Keys(c.factories)) + } + + // Get the configuration from the confmap.Conf to preserve internal representation. + sub, err := conf.Sub(id.String()) + if err != nil { + return errorUnmarshalError(id, err) + } + + // Create the default config for this component. + cfg := factory.CreateDefaultConfig() + + // Now that the default config struct is created we can Unmarshal into it, + // and it will apply user-defined config on top of the default. + if err := sub.Unmarshal(&cfg); err != nil { + return errorUnmarshalError(id, err) + } + + c.cfgs[id] = cfg + } + + return nil +} + +func errorUnknownType(id component.ID, factories []component.Type) error { + return fmt.Errorf("unknown type: %q for id: %q (valid values: %v)", id.Type(), id, factories) +} + +func errorUnmarshalError(id component.ID, err error) error { + return fmt.Errorf("error reading configuration for %q: %w", id, err) +} diff --git a/comp/otelcol/logsagentpipeline/go.mod b/comp/otelcol/logsagentpipeline/go.mod index b5adb734302e7..9ec153f229d1d 100644 --- a/comp/otelcol/logsagentpipeline/go.mod +++ b/comp/otelcol/logsagentpipeline/go.mod @@ -19,7 +19,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../../../pkg/logs/client @@ -61,14 +64,17 @@ require github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.56.0-rc.3 require ( github.com/DataDog/agent-payload/v5 v5.0.106 // indirect github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 // indirect @@ -84,20 +90,20 @@ require ( github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect @@ -133,7 +139,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/shirou/gopsutil/v3 v3.24.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -153,13 +159,13 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/otelcol/logsagentpipeline/go.sum b/comp/otelcol/logsagentpipeline/go.sum index a9a19f40f3cb2..d47dfc5263745 100644 --- a/comp/otelcol/logsagentpipeline/go.sum +++ b/comp/otelcol/logsagentpipeline/go.sum @@ -1,44 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/agent-payload/v5 v5.0.106 h1:A3dGX+JYoL7OJe2crpxznW7hWxLxhOk/17WbYskRWVk= github.com/DataDog/agent-payload/v5 v5.0.106/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= @@ -65,15 +27,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -88,11 +44,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -100,9 +52,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -122,68 +71,26 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -195,21 +102,15 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= @@ -219,7 +120,6 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -252,8 +152,6 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -303,8 +201,8 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -323,8 +221,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -342,19 +238,11 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= @@ -389,106 +277,42 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -496,61 +320,23 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -559,151 +345,30 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= @@ -729,12 +394,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod index 471ae801b59bb..0a941558d1d9e 100644 --- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod +++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod @@ -20,7 +20,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../../../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../../../../pkg/logs/client @@ -64,8 +67,8 @@ require ( github.com/DataDog/datadog-agent/comp/core/log/mock v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 @@ -76,7 +79,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/testutil v0.56.0-rc.3 github.com/stretchr/testify v1.9.0 @@ -88,12 +91,15 @@ require ( github.com/DataDog/agent-payload/v5 v5.0.106 // indirect github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/processor v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3 // indirect @@ -101,18 +107,18 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/log/setup v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect @@ -148,7 +154,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/shirou/gopsutil/v3 v3.24.4 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -165,13 +171,13 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.sum b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.sum index ecc6e5a04d3a6..b9b676e0052a4 100644 --- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.sum +++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.sum @@ -1,44 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/agent-payload/v5 v5.0.106 h1:A3dGX+JYoL7OJe2crpxznW7hWxLxhOk/17WbYskRWVk= github.com/DataDog/agent-payload/v5 v5.0.106/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= @@ -65,15 +27,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -88,11 +44,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -100,9 +52,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -122,68 +71,26 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -195,21 +102,15 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= @@ -219,7 +120,6 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -252,8 +152,6 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -303,8 +201,8 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -324,8 +222,6 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -343,19 +239,11 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= @@ -390,106 +278,42 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -497,61 +321,23 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -560,151 +346,30 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= @@ -730,12 +395,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/comp/otelcol/otlp/collector.go b/comp/otelcol/otlp/collector.go index e3978764031e5..2ce66bf173972 100644 --- a/comp/otelcol/otlp/collector.go +++ b/comp/otelcol/otlp/collector.go @@ -12,9 +12,10 @@ import ( "fmt" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/loggingexporter" + "go.opentelemetry.io/collector/exporter/debugexporter" "go.opentelemetry.io/collector/exporter/otlpexporter" "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/otelcol" @@ -32,6 +33,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" + "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor" @@ -85,6 +87,11 @@ func (t *tagEnricher) Enrich(_ context.Context, extraTags []string, dimensions * return enrichedTags } +func generateID(group, resource, namespace, name string) string { + + return string(util.GenerateKubeMetadataEntityID(group, resource, namespace, name)) +} + func getComponents(s serializer.MetricSerializer, logsAgentChannel chan *message.Message, tagger tagger.Component) ( otelcol.Factories, error, @@ -106,7 +113,7 @@ func getComponents(s serializer.MetricSerializer, logsAgentChannel chan *message exporterFactories := []exporter.Factory{ otlpexporter.NewFactory(), serializerexporter.NewFactory(s, &tagEnricher{cardinality: types.LowCardinality}, hostname.Get, nil, nil), - loggingexporter.NewFactory(), + debugexporter.NewFactory(), } if logsAgentChannel != nil { @@ -120,7 +127,7 @@ func getComponents(s serializer.MetricSerializer, logsAgentChannel chan *message processorFactories := []processor.Factory{batchprocessor.NewFactory()} if tagger != nil { - processorFactories = append(processorFactories, infraattributesprocessor.NewFactory(tagger)) + processorFactories = append(processorFactories, infraattributesprocessor.NewFactory(tagger, generateID)) } processors, err := processor.MakeFactoryMap(processorFactories...) if err != nil { @@ -163,30 +170,20 @@ type PipelineConfig struct { Metrics map[string]interface{} } -// valid values for debug log level. -var debugLogLevelMap = map[string]struct{}{ - "disabled": {}, - "debug": {}, - "info": {}, - "warn": {}, - "error": {}, -} - // shouldSetLoggingSection returns whether debug logging is enabled. -// If an invalid loglevel value is set, it assumes debug logging is disabled. -// If the special 'disabled' value is set, it returns false. -// Otherwise it returns true and lets the Collector handle the rest. +// Debug logging is enabled when verbosity is set to a valid value except for "none", or left unset. func (p *PipelineConfig) shouldSetLoggingSection() bool { - // Legacy behavior: keep it so that we support `loglevel: disabled`. - if v, ok := p.Debug["loglevel"]; ok { - if s, ok := v.(string); ok { - _, ok := debugLogLevelMap[s] - return ok && s != "disabled" - } + v, ok := p.Debug["verbosity"] + if !ok { + return true } - - // If the legacy behavior does not apply, we always want to set the logging section. - return true + s, ok := v.(string) + if !ok { + return false + } + var level configtelemetry.Level + err := level.UnmarshalText([]byte(s)) + return err == nil && s != "none" } // Pipeline is an OTLP pipeline. diff --git a/comp/otelcol/otlp/collector_test.go b/comp/otelcol/otlp/collector_test.go index 3d9b51062daf5..1ab4038d61da2 100644 --- a/comp/otelcol/otlp/collector_test.go +++ b/comp/otelcol/otlp/collector_test.go @@ -9,7 +9,6 @@ package otlp import ( "context" - "runtime" "testing" "time" @@ -19,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" serializermock "github.com/DataDog/datadog-agent/pkg/serializer/mocks" ) @@ -66,30 +65,21 @@ func AssertFailedRun(t *testing.T, pcfg PipelineConfig, expected string) { require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - assert.ErrorContains(t, p.Run(ctx), expected) + pipelineError := p.Run(ctx) + assert.ErrorContains(t, pipelineError, expected) } func TestStartPipeline(t *testing.T) { - config.Datadog().SetWithoutSource("hostname", "otlp-testhostname") - defer config.Datadog().SetWithoutSource("hostname", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "otlp-testhostname") + defer pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") pcfg := getTestPipelineConfig() AssertSucessfulRun(t, pcfg) } func TestStartPipelineFromConfig(t *testing.T) { - config.Datadog().SetWithoutSource("hostname", "otlp-testhostname") - defer config.Datadog().SetWithoutSource("hostname", "") - - // TODO (AP-1723): Disable changing the gRPC logger before re-enabling. - if runtime.GOOS == "windows" { - t.Skip("Skip on Windows, see AP-1723 for details") - } - - // TODO (AP-1723): Update Collector to version 0.55 before re-enabling. - if runtime.GOOS == "darwin" { - t.Skip("Skip on macOS, see AP-1723 for details") - } + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "otlp-testhostname") + defer pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") tests := []struct { path string @@ -103,7 +93,7 @@ func TestStartPipelineFromConfig(t *testing.T) { {path: "receiver/advanced.yaml"}, { path: "receiver/typo.yaml", - err: "error decoding 'receivers': error reading configuration for \"otlp\": 1 error(s) decoding:\n\n* 'protocols' has invalid keys: htttp", + err: "'protocols' has invalid keys: htttp", }, } diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod index 5f48e6da16c46..ae077e7365176 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod @@ -33,7 +33,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../../../../../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../../../../../../pkg/logs/client @@ -97,10 +100,10 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 go.opentelemetry.io/collector/config/configauth v0.104.0 @@ -127,7 +130,7 @@ require ( github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect @@ -137,11 +140,14 @@ require ( github.com/DataDog/datadog-agent/comp/serializer/compression v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/trace/compression/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 // indirect @@ -167,29 +173,29 @@ require ( github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect - github.com/DataDog/go-sqllexer v0.0.14 // indirect + github.com/DataDog/go-sqllexer v0.0.15 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/DataDog/zstd v1.5.5 // indirect @@ -290,12 +296,12 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.6.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect google.golang.org/grpc v1.64.0 // indirect diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum index cdf2e4f5f136d..1b9f4ba2789ad 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum @@ -8,24 +8,24 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= -github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= -github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.15 h1:rUUu52dP8EQhJLnUw0MIAxZp0BQx2fOTuMztr3vtHUU= +github.com/DataDog/go-sqllexer v0.0.15/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0 h1:KNiq6ofE5BBMQjl7w9fftg8z44C9z51w7qOWIKs5SCg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 h1:FaUFQE8IuaNdpOQGIhoy2h58v8AVND+yZG3gVqKAwLQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 h1:px2+7svK86oeCGd+sT1x/9f0pqIJdApGFnWI0AOPXwA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1/go.mod h1:+LijQ2LdlocAQ4WB+7KsoIGe90bfogkRslubd9swVow= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 h1:Fija8Qo0z/HngskYyBpMqmJKM2ejNr1NfXUyWszFDAw= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0/go.mod h1:lNu6vfFNCV/tyWxs8x8nCN1TqK+bPeI2dbnlwFTs8VA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 h1:x6re32f8gQ8fdCllywQyAbxQuXNrgxeimpLBfvwA97g= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0/go.mod h1:R84ZVbxKSgMxzvJro/MftVrlkGm2C2gndUhV35wyR8A= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= @@ -470,11 +470,11 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -497,8 +497,8 @@ golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -530,15 +530,15 @@ golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod index e689f59dd1341..e0d3afcbfb7eb 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod @@ -16,7 +16,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/message => ../../../../../../pkg/logs/message github.com/DataDog/datadog-agent/pkg/logs/sources => ../../../../../../pkg/logs/sources @@ -44,9 +47,9 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.14.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 github.com/stormcat24/protodep v0.1.8 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 @@ -55,27 +58,30 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/proto v0.55.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.13.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/DataDog/zstd v1.5.2 // indirect @@ -123,7 +129,7 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/shirou/gopsutil/v3 v3.24.4 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -149,10 +155,10 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum index 35e04032d9e4c..8b51e24f9825e 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum @@ -2,12 +2,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-api-client-go/v2 v2.13.0 h1:2c1dXSyUfum2YIVoYlqnBhV5JOG1cLSW+4jB3RrKjLc= github.com/DataDog/datadog-api-client-go/v2 v2.13.0/go.mod h1:kntOqXEh1SmjwSDzW/eJkr9kS7EqttvEkelglWtJRbg= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0 h1:1FGr7e8wAebpvpoabdQcRt5WtPCJ2W2kDPzLfOb07/c= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.14.0 h1:nma5ZICTbHZ0YoMu18ziWGSLK1ICzMm6rJTv+IatJ0U= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.14.0/go.mod h1:xUiGj13q5uHPboc0xZ754fyusiF5C2RxNzOFdTbdZFA= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= @@ -232,8 +232,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -350,8 +351,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -413,8 +414,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go index 7bf6c54154f23..04f8eaeae152b 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go @@ -94,6 +94,9 @@ func (e *Exporter) ConsumeLogs(ctx context.Context, ld plog.Logs) (err error) { // ingestionTs is an internal field used for latency tracking on the status page, not the actual log timestamp. ingestionTs := time.Now().UnixNano() message := message.NewMessage(content, origin, status, ingestionTs) + if ddLog.Hostname != nil { + message.Hostname = *ddLog.Hostname + } e.logsAgentChannel <- message } diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go index 3944c650f7828..839b5a294c1d2 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go @@ -73,6 +73,7 @@ func TestLogsExporter(t *testing.T) { ldd := lrr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) ldd.Attributes().PutStr("message", "hello") ldd.Attributes().PutStr("datadog.log.source", "custom_source") + ldd.Attributes().PutStr("host.name", "test-host") return lrr }(), otelSource: otelSource, @@ -95,6 +96,8 @@ func TestLogsExporter(t *testing.T) { "otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()), "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), "resource-attr": "resource-attr-val-1", + "host.name": "test-host", + "hostname": "test-host", }, }, expectedTags: [][]string{{"otel_source:datadog_agent"}}, diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod index 14c69501c50af..9bd6ba81918c4 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod @@ -22,7 +22,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/metrics => ../../../../../../pkg/metrics github.com/DataDog/datadog-agent/pkg/obfuscate => ../../../../../../pkg/obfuscate @@ -65,9 +67,9 @@ require ( github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0 github.com/stretchr/testify v1.9.0 @@ -104,7 +106,9 @@ require ( github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/process/util/api v0.56.0-rc.3 // indirect @@ -203,13 +207,13 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.22.2 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect google.golang.org/grpc v1.64.0 // indirect google.golang.org/protobuf v1.34.2 // indirect diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum index cc8e3ecec4203..ae83dc26999cd 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum @@ -4,14 +4,14 @@ github.com/DataDog/agent-payload/v5 v5.0.114 h1:qg3jfzz2/lOFKbFOw2yM6RM8eyMs4HlE github.com/DataDog/agent-payload/v5 v5.0.114/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0 h1:KNiq6ofE5BBMQjl7w9fftg8z44C9z51w7qOWIKs5SCg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 h1:FaUFQE8IuaNdpOQGIhoy2h58v8AVND+yZG3gVqKAwLQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 h1:Fija8Qo0z/HngskYyBpMqmJKM2ejNr1NfXUyWszFDAw= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0/go.mod h1:lNu6vfFNCV/tyWxs8x8nCN1TqK+bPeI2dbnlwFTs8VA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 h1:x6re32f8gQ8fdCllywQyAbxQuXNrgxeimpLBfvwA97g= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0/go.mod h1:R84ZVbxKSgMxzvJro/MftVrlkGm2C2gndUhV35wyR8A= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= @@ -394,8 +394,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -405,8 +405,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -423,8 +423,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -459,8 +459,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= @@ -468,8 +468,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -488,8 +488,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md b/comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md new file mode 100644 index 0000000000000..3c2859b510b64 --- /dev/null +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/README.md @@ -0,0 +1,196 @@ +# Infra Attributes Processor + +The infra attributes processor extracts [Kubernetes tags](https://docs.datadoghq.com/containers/kubernetes/tag/?tab=datadogoperator#out-of-the-box-tags) based on labels or annotations and assigns these tags as resource attributes on traces, metrics, and logs. + +When telemetry is exported from the otel-agent, these infra attributes will be converted into Datadog tags and used as metadata in [Container Monitoring](https://docs.datadoghq.com/containers/). + +## Configuration + +The infra attributes processor will be added automatically by the [converter component](../../../../converter/README.md). If you opted out of the converter, or you want to change the defaults, you are able to configure the processor as so: +``` +processors: + infraattributes: + cardinality: 0 +``` + +The infra attributes processor also needs to be included in the pipelines in order to take effect: +``` +service: + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes] + exporters: [datadog/connector, datadog] + metrics: + receivers: [otlp, datadog/connector] + processors: [infraattributes] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes] + exporters: [datadog] +``` + +### Cardinality +The cardinality option sets the [TagCardinality](../../../../../../comp/core/tagger/README.md#tagcardinality) in the Datadog Agent tagger component. Possible values for this option include: +* `cardinality: 0` - **LowCardinality**: in the host count order of magnitude *(default)* +* `cardinality: 1` - **OrchestratorCardinality**: tags that change value for each pod or task +* `cardinality: 2` - **HighCardinality**: typically tags that change value for each web request, user agent, container, etc. + +## Expected Attributes + +The infra attributes processor [looks up the following resource attributes](https://github.com/DataDog/datadog-agent/blob/7d51e9e0dc9fb52aab468b372a5724eece97538c/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go#L42-L77) in order to extract Kubernetes Tags. These resource attributes can be set in your SDK or in your otel-agent collector configuration: + +| *[Entity](../../../../../../comp/core/tagger/README.md#entity-ids)* | *Resource Attributes* | +|----------------------------------------------------------------------|---------------------------------------------| +| workloadmeta.KindContainer | `container.id` | +| workloadmeta.KindContainerImageMetadata | `container.image.id` | +| workloadmeta.KindECSTask | `aws.ecs.task.arn` | +| workloadmeta.KindKubernetesDeployment | `k8s.deployment.name`, `k8s.namespace.name` | +| workloadmeta.KindKubernetesMetadata | `k8s.namespace.name`, `k8s.node.name` | +| workloadmeta.KindKubernetesPod | `k8s.pod.uid` | +| workloadmeta.KindProcess | `process.pid` | + +### SDK Configuration + +The expected resource attributes can be set by using the `OTEL_RESOURCE_ATTRIBUTES` environment variable. For example, this can be set in your Kubernetes deployment yaml: +``` +env: + ... + - name: OTEL_SERVICE_NAME + value: {{ include "calendar.fullname" . }} + - name: OTEL_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: OTEL_K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: OTEL_K8S_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: OTEL_K8S_POD_ID + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.uid + - name: OTEL_RESOURCE_ATTRIBUTES + value: >- + service.name=$(OTEL_SERVICE_NAME), + k8s.namespace.name=$(OTEL_K8S_NAMESPACE), + k8s.node.name=$(OTEL_K8S_NODE_NAME), + k8s.pod.name=$(OTEL_K8S_POD_NAME), + k8s.pod.uid=$(OTEL_K8S_POD_ID), + k8s.container.name={{ .Chart.Name }}, + host.name=$(OTEL_K8S_NODE_NAME), + deployment.environment=$(OTEL_K8S_NAMESPACE) +``` + +If you are using OTel SDK auto-instrumentation, `container.id` and `process.pid` will be automatically set by your SDK. + +### Collector Configuration + +The expected resource attributes can be set by configuring the [Kubernetes attributes processor and resource detection processor](https://docs.datadoghq.com/opentelemetry/collector_exporter/hostname_tagging/?tab=kubernetesdaemonset). This is demonstrated in the [k8s-values.yaml](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/examples/k8s-chart/k8s-values.yaml) example: +``` +mode: daemonset +presets: + kubernetesAttributes: + enabled: true +extraEnvs: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: OTEL_RESOURCE_ATTRIBUTES + value: "k8s.pod.ip=$(POD_IP)" +config: + processors: + k8sattributes: + passthrough: false + auth_type: "serviceAccount" + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + extract: + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.deployment.name + - k8s.node.name + - k8s.namespace.name + - k8s.pod.start_time + - k8s.replicaset.name + - k8s.replicaset.uid + - k8s.daemonset.name + - k8s.daemonset.uid + - k8s.job.name + - k8s.job.uid + - k8s.cronjob.name + - k8s.statefulset.name + - k8s.statefulset.uid + - container.image.name + - container.image.tag + - container.id + - k8s.container.name + - container.image.name + - container.image.tag + - container.id + labels: + - tag_name: kube_app_name + key: app.kubernetes.io/name + from: pod + - tag_name: kube_app_instance + key: app.kubernetes.io/instance + from: pod + - tag_name: kube_app_version + key: app.kubernetes.io/version + from: pod + - tag_name: kube_app_component + key: app.kubernetes.io/component + from: pod + - tag_name: kube_app_part_of + key: app.kubernetes.io/part-of + from: pod + - tag_name: kube_app_managed_by + key: app.kubernetes.io/managed-by + from: pod + resourcedetection: + detectors: [env, eks, ec2, system] + timeout: 2s + override: false + batch: + send_batch_max_size: 1000 + send_batch_size: 100 + timeout: 10s + exporters: + datadog: + api: + site: ${env:DD_SITE} + key: ${env:DD_API_KEY} + traces: + trace_buffer: 500 + service: + pipelines: + metrics: + receivers: [otlp] + processors: [batch, resourcedetection, k8sattributes] + exporters: [datadog] + traces: + receivers: [otlp] + processors: [batch, resourcedetection, k8sattributes] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [batch, resourcedetection, k8sattributes] + exporters: [datadog] +``` + +## List of Kubernetes Tags + +For the full list of Kubernetes Tags added by the infra attributes processor, see [comp/core/tagger/tags/tags.go](../../../../../../comp/core/tagger/tags/tags.go). diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go index e34800eaf21df..5cf3de7117651 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go @@ -35,7 +35,8 @@ func TestLoadingConfigStrictLogs(t *testing.T) { for _, tt := range tests { t.Run(tt.id.String(), func(t *testing.T) { tc := newTestTaggerClient() - f := NewFactory(tc) + gc := newTestGenerateIDClient().generateID + f := NewFactory(tc, gc) cfg := f.CreateDefaultConfig() sub, err := cm.Sub(tt.id.String()) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go index 086c89e1bd417..429ee568fb79f 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go @@ -18,14 +18,18 @@ import ( var processorCapabilities = consumer.Capabilities{MutatesData: true} +// TODO: Remove tagger and generateID as depenendencies to enable future import of +// infraattributesprocessor by external go packages like ocb type factory struct { - tagger taggerClient + tagger taggerClient + generateID GenerateKubeMetadataEntityID } // NewFactory returns a new factory for the InfraAttributes processor. -func NewFactory(tagger taggerClient) processor.Factory { +func NewFactory(tagger taggerClient, generateID GenerateKubeMetadataEntityID) processor.Factory { f := &factory{ - tagger: tagger, + tagger: tagger, + generateID: generateID, } return processor.NewFactory( @@ -49,7 +53,7 @@ func (f *factory) createMetricsProcessor( cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { - iap, err := newInfraAttributesMetricProcessor(set, cfg.(*Config), f.tagger) + iap, err := newInfraAttributesMetricProcessor(set, cfg.(*Config), f.tagger, f.generateID) if err != nil { return nil, err } @@ -68,7 +72,7 @@ func (f *factory) createLogsProcessor( cfg component.Config, nextConsumer consumer.Logs, ) (processor.Logs, error) { - iap, err := newInfraAttributesLogsProcessor(set, cfg.(*Config), f.tagger) + iap, err := newInfraAttributesLogsProcessor(set, cfg.(*Config), f.tagger, f.generateID) if err != nil { return nil, err } @@ -87,7 +91,7 @@ func (f *factory) createTracesProcessor( cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { - iap, err := newInfraAttributesSpanProcessor(set, cfg.(*Config), f.tagger) + iap, err := newInfraAttributesSpanProcessor(set, cfg.(*Config), f.tagger, f.generateID) if err != nil { return nil, err } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go index 5ba0b9eb0ce0e..21ee70fd1874a 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go @@ -21,7 +21,8 @@ import ( func TestType(t *testing.T) { tc := newTestTaggerClient() - factory := NewFactory(tc) + gc := newTestGenerateIDClient().generateID + factory := NewFactory(tc, gc) pType := factory.Type() assert.Equal(t, pType, Type) @@ -29,7 +30,8 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { tc := newTestTaggerClient() - factory := NewFactory(tc) + gc := newTestGenerateIDClient().generateID + factory := NewFactory(tc, gc) cfg := factory.CreateDefaultConfig() assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } @@ -52,10 +54,11 @@ func TestCreateProcessors(t *testing.T) { cm, err := confmaptest.LoadConf(filepath.Join("testdata", tt.configName)) require.NoError(t, err) tc := newTestTaggerClient() + gc := newTestGenerateIDClient().generateID for k := range cm.ToStringMap() { // Check if all processor variations that are defined in test config can be actually created - factory := NewFactory(tc) + factory := NewFactory(tc, gc) cfg := factory.CreateDefaultConfig() sub, err := cm.Sub(k) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod new file mode 100644 index 0000000000000..9a09207d51ca7 --- /dev/null +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod @@ -0,0 +1,138 @@ +module github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../../../api/api/def + github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../../../core/flare/builder + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../../../core/flare/types + github.com/DataDog/datadog-agent/comp/core/secrets => ../../../../../core/secrets + github.com/DataDog/datadog-agent/comp/core/tagger/common => ../../../../../core/tagger/common + github.com/DataDog/datadog-agent/comp/core/tagger/types => ../../../../../core/tagger/types + github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../../../../../core/tagger/utils + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../../../core/telemetry + github.com/DataDog/datadog-agent/comp/def => ../../../../../def + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../../../pkg/collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../../../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/model => ../../../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../../../pkg/config/nodetreemodel + github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../../../pkg/config/teeconfig + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../../../pkg/util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../../../pkg/util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../../../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../../../pkg/util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../../../../../../pkg/util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/tagger => ../../../../../../pkg/util/tagger + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../../../pkg/util/testutil + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../../../pkg/util/winutil +) + +require ( + github.com/DataDog/datadog-agent/comp/core/tagger/types v0.56.0-rc.3 + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/component v0.104.0 + go.opentelemetry.io/collector/confmap v0.104.0 + go.opentelemetry.io/collector/consumer v0.104.0 + go.opentelemetry.io/collector/pdata v1.11.0 + go.opentelemetry.io/collector/processor v0.104.0 + go.opentelemetry.io/collector/semconv v0.104.0 + go.opentelemetry.io/otel/metric v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 + go.uber.org/zap v1.27.0 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 // indirect + github.com/DataDog/viper v1.13.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/procfs v0.15.0 // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/collector v0.104.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect + go.opentelemetry.io/collector/featuregate v1.11.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.104.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.104.0 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect + go.opentelemetry.io/otel/sdk v1.27.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum new file mode 100644 index 0000000000000..4fe7706439056 --- /dev/null +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum @@ -0,0 +1,423 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= +github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= +go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= +go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= +go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= +go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= +go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= +go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= +go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= +go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= +go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= +go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= +go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= +go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= +go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= +go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= +go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= +go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= +go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= +go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= +go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= +go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= +go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= +go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients.go similarity index 100% rename from comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient.go rename to comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients.go diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go similarity index 74% rename from comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go rename to comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go index 41fc7ee5ca3b8..989623475184a 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go @@ -6,7 +6,8 @@ package infraattributesprocessor import ( - "github.com/DataDog/datadog-agent/comp/core/tagger/common" + "fmt" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -29,5 +30,15 @@ func (t *testTaggerClient) Tag(entityID string, _ types.TagCardinality) ([]strin // GlobalTags mocks taggerimpl.GlobalTags functionality for purpose of testing, removing dependency on Taggerimpl func (t *testTaggerClient) GlobalTags(_ types.TagCardinality) ([]string, error) { - return t.tagMap[common.GetGlobalEntityID().String()], nil + return t.tagMap[types.NewEntityID("internal", "global-entity-id").String()], nil +} + +type testGenerateIDClient struct{} + +func newTestGenerateIDClient() *testGenerateIDClient { + return &testGenerateIDClient{} +} + +func (t *testGenerateIDClient) generateID(group, resource, namespace, name string) string { + return fmt.Sprintf("%s/%s/%s/%s", group, resource, namespace, name) } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go index bd3ef1584890a..8d63b39c3754b 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go @@ -19,13 +19,15 @@ type infraAttributesLogProcessor struct { logger *zap.Logger tagger taggerClient cardinality types.TagCardinality + generateID GenerateKubeMetadataEntityID } -func newInfraAttributesLogsProcessor(set processor.Settings, cfg *Config, tagger taggerClient) (*infraAttributesLogProcessor, error) { +func newInfraAttributesLogsProcessor(set processor.Settings, cfg *Config, tagger taggerClient, generateID GenerateKubeMetadataEntityID) (*infraAttributesLogProcessor, error) { ialp := &infraAttributesLogProcessor{ logger: set.Logger, tagger: tagger, cardinality: cfg.Cardinality, + generateID: generateID, } set.Logger.Info("Logs Infra Attributes Processor configured") @@ -36,7 +38,7 @@ func (ialp *infraAttributesLogProcessor) processLogs(_ context.Context, ld plog. rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { resourceAttributes := rls.At(i).Resource().Attributes() - entityIDs := entityIDsFromAttributes(resourceAttributes) + entityIDs := entityIDsFromAttributes(resourceAttributes, ialp.generateID) tagMap := make(map[string]string) // Get all unique tags from resource attributes and global tags diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go index 054c6667d1fc1..4248ee573697e 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go @@ -14,7 +14,6 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/processor/processortest" - "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -127,9 +126,10 @@ func TestInfraAttributesLogProcessor(t *testing.T) { tc := newTestTaggerClient() tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} - tc.tagMap[common.GetGlobalEntityID().String()] = []string{"global:tag"} + tc.tagMap[types.NewEntityID("internal", "global-entity-id").String()] = []string{"global:tag"} + gc := newTestGenerateIDClient().generateID - factory := NewFactory(tc) + factory := NewFactory(tc, gc) flp, err := factory.CreateLogsProcessor( context.Background(), processortest.NewNopSettings(), diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go index 5dcbd2e1f974c..e7a8cb3d7a700 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go @@ -11,7 +11,6 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -24,22 +23,27 @@ type infraAttributesMetricProcessor struct { logger *zap.Logger tagger taggerClient cardinality types.TagCardinality + generateID GenerateKubeMetadataEntityID } -func newInfraAttributesMetricProcessor(set processor.Settings, cfg *Config, tagger taggerClient) (*infraAttributesMetricProcessor, error) { +func newInfraAttributesMetricProcessor(set processor.Settings, cfg *Config, tagger taggerClient, generateID GenerateKubeMetadataEntityID) (*infraAttributesMetricProcessor, error) { iamp := &infraAttributesMetricProcessor{ logger: set.Logger, tagger: tagger, cardinality: cfg.Cardinality, + generateID: generateID, } set.Logger.Info("Metric Infra Attributes Processor configured") return iamp, nil } +// GenerateKubeMetadataEntityID is a function that generates an entity ID for a Kubernetes resource. +type GenerateKubeMetadataEntityID func(group, resource, namespace, name string) string + // TODO: Replace OriginIDFromAttributes in opentelemetry-mapping-go with this method // entityIDsFromAttributes gets the entity IDs from resource attributes. // If not found, an empty string slice is returned. -func entityIDsFromAttributes(attrs pcommon.Map) []types.EntityID { +func entityIDsFromAttributes(attrs pcommon.Map, generateID GenerateKubeMetadataEntityID) []types.EntityID { entityIDs := make([]types.EntityID, 0, 8) // Prefixes come from pkg/util/kubernetes/kubelet and pkg/util/containers. if containerID, ok := attrs.Get(conventions.AttributeContainerID); ok { @@ -61,11 +65,11 @@ func entityIDsFromAttributes(attrs pcommon.Map) []types.EntityID { } } if namespace, ok := attrs.Get(conventions.AttributeK8SNamespaceName); ok { - entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID("", "namespaces", "", namespace.AsString())))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "namespaces", "", namespace.AsString()))) } if nodeName, ok := attrs.Get(conventions.AttributeK8SNodeName); ok { - entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID("", "nodes", "", nodeName.AsString())))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "nodes", "", nodeName.AsString()))) } if podUID, ok := attrs.Get(conventions.AttributeK8SPodUID); ok { entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesPodUID, podUID.AsString())) @@ -88,7 +92,7 @@ func (iamp *infraAttributesMetricProcessor) processMetrics(_ context.Context, md rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { resourceAttributes := rms.At(i).Resource().Attributes() - entityIDs := entityIDsFromAttributes(resourceAttributes) + entityIDs := entityIDsFromAttributes(resourceAttributes, iamp.generateID) tagMap := make(map[string]string) // Get all unique tags from resource attributes and global tags diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go index 023f60b904edc..01cecbaefcc62 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go @@ -16,7 +16,6 @@ import ( "go.opentelemetry.io/collector/processor/processortest" conventions "go.opentelemetry.io/collector/semconv/v1.21.0" - "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -130,8 +129,10 @@ func TestInfraAttributesMetricProcessor(t *testing.T) { tc := newTestTaggerClient() tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} - tc.tagMap[common.GetGlobalEntityID().String()] = []string{"global:tag"} - factory := NewFactory(tc) + tc.tagMap[types.NewEntityID("internal", "global-entity-id").String()] = []string{"global:tag"} + gc := newTestGenerateIDClient().generateID + + factory := NewFactory(tc, gc) fmp, err := factory.CreateMetricsProcessor( context.Background(), processortest.NewNopSettings(), @@ -262,10 +263,10 @@ func TestEntityIDsFromAttributes(t *testing.T) { entityIDs: []string{"process://process_pid_goes_here"}, }, } - + gc := newTestGenerateIDClient().generateID for _, testInstance := range tests { t.Run(testInstance.name, func(t *testing.T) { - entityIDs := entityIDsFromAttributes(testInstance.attrs) + entityIDs := entityIDsFromAttributes(testInstance.attrs, gc) entityIDsAsStrings := make([]string, len(entityIDs)) for idx, entityID := range entityIDs { entityIDsAsStrings[idx] = entityID.String() diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go index d35f7b4009208..ce6fe02674e95 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go @@ -19,13 +19,15 @@ type infraAttributesSpanProcessor struct { logger *zap.Logger tagger taggerClient cardinality types.TagCardinality + generateID GenerateKubeMetadataEntityID } -func newInfraAttributesSpanProcessor(set processor.Settings, cfg *Config, tagger taggerClient) (*infraAttributesSpanProcessor, error) { +func newInfraAttributesSpanProcessor(set processor.Settings, cfg *Config, tagger taggerClient, generateID GenerateKubeMetadataEntityID) (*infraAttributesSpanProcessor, error) { iasp := &infraAttributesSpanProcessor{ logger: set.Logger, tagger: tagger, cardinality: cfg.Cardinality, + generateID: generateID, } set.Logger.Info("Span Infra Attributes Processor configured") return iasp, nil @@ -35,7 +37,7 @@ func (iasp *infraAttributesSpanProcessor) processTraces(_ context.Context, td pt rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { resourceAttributes := rss.At(i).Resource().Attributes() - entityIDs := entityIDsFromAttributes(resourceAttributes) + entityIDs := entityIDsFromAttributes(resourceAttributes, iasp.generateID) tagMap := make(map[string]string) // Get all unique tags from resource attributes and global tags diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go index 53cf8ad88f134..0c9a47324e1df 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go @@ -14,7 +14,6 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor/processortest" - "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -127,8 +126,9 @@ func TestInfraAttributesTraceProcessor(t *testing.T) { tc := newTestTaggerClient() tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} - tc.tagMap[common.GetGlobalEntityID().String()] = []string{"global:tag"} - factory := NewFactory(tc) + tc.tagMap[types.NewEntityID("internal", "global-entity-id").String()] = []string{"global:tag"} + gc := newTestGenerateIDClient().generateID + factory := NewFactory(tc, gc) fmp, err := factory.CreateTracesProcessor( context.Background(), processortest.NewNopSettings(), diff --git a/comp/otelcol/otlp/components/statsprocessor/go.mod b/comp/otelcol/otlp/components/statsprocessor/go.mod index b21dae11b5ade..bec1bac721a17 100644 --- a/comp/otelcol/otlp/components/statsprocessor/go.mod +++ b/comp/otelcol/otlp/components/statsprocessor/go.mod @@ -23,7 +23,7 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 go.opentelemetry.io/collector/pdata v1.11.0 @@ -38,7 +38,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/go-sqllexer v0.0.14 // indirect + github.com/DataDog/go-sqllexer v0.0.15 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/sketches-go v1.4.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/comp/otelcol/otlp/components/statsprocessor/go.sum b/comp/otelcol/otlp/components/statsprocessor/go.sum index a928af42bb225..a08668d36b2f8 100644 --- a/comp/otelcol/otlp/components/statsprocessor/go.sum +++ b/comp/otelcol/otlp/components/statsprocessor/go.sum @@ -1,11 +1,11 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= -github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.15 h1:rUUu52dP8EQhJLnUw0MIAxZp0BQx2fOTuMztr3vtHUU= +github.com/DataDog/go-sqllexer v0.0.15/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= diff --git a/comp/otelcol/otlp/config_test.go b/comp/otelcol/otlp/config_test.go index 593c27e495cfc..faa2223c8c661 100644 --- a/comp/otelcol/otlp/config_test.go +++ b/comp/otelcol/otlp/config_test.go @@ -304,7 +304,7 @@ func TestFromEnvironmentVariables(t *testing.T) { name: "only gRPC, disabled logging", env: map[string]string{ "DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT": "0.0.0.0:9999", - "DD_OTLP_CONFIG_DEBUG_LOGLEVEL": "disabled", + "DD_OTLP_CONFIG_DEBUG_VERBOSITY": "none", }, cfg: PipelineConfig{ OTLPReceiverConfig: map[string]interface{}{ @@ -325,7 +325,7 @@ func TestFromEnvironmentVariables(t *testing.T) { "apm_stats_receiver_addr": "http://localhost:8126/v0.6/stats", }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, }, @@ -475,7 +475,7 @@ func TestFromAgentConfigMetrics(t *testing.T) { "tags": "tag1:value1,tag2:value2", }, Debug: map[string]interface{}{ - "loglevel": "debug", + "verbosity": "detailed", }, }, }, @@ -520,7 +520,7 @@ func TestFromAgentConfigDebug(t *testing.T) { }, }, { - path: "debug/loglevel_debug.yaml", + path: "debug/verbosity_detailed.yaml", shouldSet: true, cfg: PipelineConfig{ OTLPReceiverConfig: map[string]interface{}{}, @@ -529,7 +529,7 @@ func TestFromAgentConfigDebug(t *testing.T) { MetricsEnabled: true, TracesEnabled: true, LogsEnabled: false, - Debug: map[string]interface{}{"loglevel": "debug"}, + Debug: map[string]interface{}{"verbosity": "detailed"}, Metrics: map[string]interface{}{ "enabled": true, "tag_cardinality": "low", @@ -538,7 +538,7 @@ func TestFromAgentConfigDebug(t *testing.T) { }, }, { - path: "debug/loglevel_disabled.yaml", + path: "debug/verbosity_none.yaml", shouldSet: false, cfg: PipelineConfig{ OTLPReceiverConfig: map[string]interface{}{}, @@ -547,7 +547,7 @@ func TestFromAgentConfigDebug(t *testing.T) { MetricsEnabled: true, TracesEnabled: true, LogsEnabled: false, - Debug: map[string]interface{}{"loglevel": "disabled"}, + Debug: map[string]interface{}{"verbosity": "none"}, Metrics: map[string]interface{}{ "enabled": true, "tag_cardinality": "low", diff --git a/comp/otelcol/otlp/integrationtest/integration_test.go b/comp/otelcol/otlp/integrationtest/integration_test.go index e073c29ba1f3c..d80e75f2ad6d7 100644 --- a/comp/otelcol/otlp/integrationtest/integration_test.go +++ b/comp/otelcol/otlp/integrationtest/integration_test.go @@ -54,7 +54,6 @@ import ( collectorcontribFx "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/fx" collectordef "github.com/DataDog/datadog-agent/comp/otelcol/collector/def" collectorfx "github.com/DataDog/datadog-agent/comp/otelcol/collector/fx" - configstorefx "github.com/DataDog/datadog-agent/comp/otelcol/configstore/fx" converter "github.com/DataDog/datadog-agent/comp/otelcol/converter/def" converterfx "github.com/DataDog/datadog-agent/comp/otelcol/converter/fx" "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline" @@ -96,7 +95,6 @@ func runTestOTelAgent(ctx context.Context, params *subcommands.GlobalParams) err fx.Provide(func(cp converter.Component) confmap.Converter { return cp }), - configstorefx.Module(), fx.Provide(func() (coreconfig.Component, error) { c, err := agentConfig.NewConfigComponent(context.Background(), "", params.ConfPaths) if err != nil { diff --git a/comp/otelcol/otlp/map_provider.go b/comp/otelcol/otlp/map_provider.go index ed443ff15ca56..0c65bcc0fa99d 100644 --- a/comp/otelcol/otlp/map_provider.go +++ b/comp/otelcol/otlp/map_provider.go @@ -94,31 +94,31 @@ func buildMap(cfg PipelineConfig) (*confmap.Conf, error) { if cfg.shouldSetLoggingSection() { m := map[string]interface{}{ "exporters": map[string]interface{}{ - "logging": cfg.Debug, + "debug": cfg.Debug, }, } if cfg.MetricsEnabled { key := buildKey("service", "pipelines", "metrics", "exporters") if v, ok := retMap.Get(key).([]interface{}); ok { - m[key] = append(v, "logging") + m[key] = append(v, "debug") } else { - m[key] = []interface{}{"logging"} + m[key] = []interface{}{"debug"} } } if cfg.TracesEnabled { key := buildKey("service", "pipelines", "traces", "exporters") if v, ok := retMap.Get(key).([]interface{}); ok { - m[key] = append(v, "logging") + m[key] = append(v, "debug") } else { - m[key] = []interface{}{"logging"} + m[key] = []interface{}{"debug"} } } if cfg.LogsEnabled { key := buildKey("service", "pipelines", "logs", "exporters") if v, ok := retMap.Get(key).([]interface{}); ok { - m[key] = append(v, "logging") + m[key] = append(v, "debug") } else { - m[key] = []interface{}{"logging"} + m[key] = []interface{}{"debug"} } } errs = append(errs, retMap.Merge(confmap.NewFromStringMap(m))) diff --git a/comp/otelcol/otlp/map_provider_not_serverless_test.go b/comp/otelcol/otlp/map_provider_not_serverless_test.go index 9b9624e405f06..4ee1a08a38efe 100644 --- a/comp/otelcol/otlp/map_provider_not_serverless_test.go +++ b/comp/otelcol/otlp/map_provider_not_serverless_test.go @@ -37,7 +37,7 @@ func TestNewMap(t *testing.T) { TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -91,7 +91,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -150,7 +150,7 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only HTTP, metrics and traces, invalid loglevel(ignored)", + name: "only HTTP, metrics and traces, invalid verbosity (ignored)", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -167,7 +167,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "foo", + "verbosity": "foo", }, }, ocfg: map[string]interface{}{ @@ -232,7 +232,7 @@ func TestNewMap(t *testing.T) { TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -288,7 +288,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -333,13 +333,13 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only gRPC, only Traces, logging info", + name: "only gRPC, only Traces, logging with normal verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0), TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "info", + "verbosity": "normal", }, }, ocfg: map[string]interface{}{ @@ -363,8 +363,8 @@ func TestNewMap(t *testing.T) { "enabled": false, }, }, - "logging": map[string]interface{}{ - "loglevel": "info", + "debug": map[string]interface{}{ + "verbosity": "normal", }, }, "service": map[string]interface{}{ @@ -372,14 +372,14 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, }, }, }, }, { - name: "only HTTP, only metrics, logging debug", + name: "only HTTP, only metrics, logging with detailed verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -394,7 +394,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "debug", + "verbosity": "detailed", }, }, ocfg: map[string]interface{}{ @@ -424,8 +424,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "debug", + "debug": map[string]interface{}{ + "verbosity": "detailed", }, }, "service": map[string]interface{}{ @@ -434,14 +434,14 @@ func TestNewMap(t *testing.T) { "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"batch"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, }, }, }, }, { - name: "only HTTP, metrics and traces, logging warn", + name: "only HTTP, metrics and traces, logging with basic verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -457,7 +457,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "warn", + "verbosity": "basic", }, }, ocfg: map[string]interface{}{ @@ -497,8 +497,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "warn", + "debug": map[string]interface{}{ + "verbosity": "basic", }, }, "service": map[string]interface{}{ @@ -506,12 +506,12 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"batch"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, }, }, @@ -525,7 +525,7 @@ func TestNewMap(t *testing.T) { TracesEnabled: true, LogsEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -592,7 +592,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -658,7 +658,7 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only HTTP; metrics, logs and traces; invalid loglevel(ignored)", + name: "only HTTP; metrics, logs and traces; invalid verbosity (ignored)", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -676,7 +676,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "foo", + "verbosity": "foo", }, }, ocfg: map[string]interface{}{ @@ -749,7 +749,7 @@ func TestNewMap(t *testing.T) { TracesEnabled: true, LogsEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -818,7 +818,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -870,14 +870,14 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only gRPC, traces and logs, logging info", + name: "only gRPC, traces and logs, logging with normal verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0), TracePort: 5003, TracesEnabled: true, LogsEnabled: true, Debug: map[string]interface{}{ - "loglevel": "info", + "verbosity": "normal", }, }, ocfg: map[string]interface{}{ @@ -907,8 +907,8 @@ func TestNewMap(t *testing.T) { "enabled": false, }, }, - "logging": map[string]interface{}{ - "loglevel": "info", + "debug": map[string]interface{}{ + "verbosity": "normal", }, "logsagent": interface{}(nil), }, @@ -917,19 +917,19 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, "logs": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"infraattributes", "batch"}, - "exporters": []interface{}{"logsagent", "logging"}, + "exporters": []interface{}{"logsagent", "debug"}, }, }, }, }, }, { - name: "only HTTP, metrics and logs, logging debug", + name: "only HTTP, metrics and logs, logging with detailed verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -945,7 +945,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "debug", + "verbosity": "detailed", }, }, ocfg: map[string]interface{}{ @@ -976,8 +976,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "debug", + "debug": map[string]interface{}{ + "verbosity": "detailed", }, "logsagent": interface{}(nil), }, @@ -987,19 +987,19 @@ func TestNewMap(t *testing.T) { "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"batch"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, "logs": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"infraattributes", "batch"}, - "exporters": []interface{}{"logsagent", "logging"}, + "exporters": []interface{}{"logsagent", "debug"}, }, }, }, }, }, { - name: "only HTTP; metrics, traces, and logs; logging warn", + name: "only HTTP; metrics, traces, and logs; logging with basic verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -1016,7 +1016,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "warn", + "verbosity": "basic", }, }, ocfg: map[string]interface{}{ @@ -1057,8 +1057,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "warn", + "debug": map[string]interface{}{ + "verbosity": "basic", }, "logsagent": interface{}(nil), }, @@ -1067,17 +1067,17 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"batch"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, "logs": map[string]interface{}{ "receivers": []interface{}{"otlp"}, "processors": []interface{}{"infraattributes", "batch"}, - "exporters": []interface{}{"logsagent", "logging"}, + "exporters": []interface{}{"logsagent", "debug"}, }, }, }, diff --git a/comp/otelcol/otlp/map_provider_serverless_test.go b/comp/otelcol/otlp/map_provider_serverless_test.go index 7e1721cd33883..99f58c78a1660 100644 --- a/comp/otelcol/otlp/map_provider_serverless_test.go +++ b/comp/otelcol/otlp/map_provider_serverless_test.go @@ -31,7 +31,7 @@ func TestNewMap(t *testing.T) { TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -85,7 +85,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -138,7 +138,7 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only HTTP, metrics and traces, invalid loglevel(ignored)", + name: "only HTTP, metrics and traces, invalid verbosity (ignored)", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -155,7 +155,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "foo", + "verbosity": "foo", }, }, ocfg: map[string]interface{}{ @@ -214,7 +214,7 @@ func TestNewMap(t *testing.T) { TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -270,7 +270,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "disabled", + "verbosity": "none", }, }, ocfg: map[string]interface{}{ @@ -309,13 +309,13 @@ func TestNewMap(t *testing.T) { }, }, { - name: "only gRPC, only Traces, logging info", + name: "only gRPC, only Traces, logging with normal verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 1234, 0), TracePort: 5003, TracesEnabled: true, Debug: map[string]interface{}{ - "loglevel": "info", + "verbosity": "normal", }, }, ocfg: map[string]interface{}{ @@ -339,8 +339,8 @@ func TestNewMap(t *testing.T) { "enabled": false, }, }, - "logging": map[string]interface{}{ - "loglevel": "info", + "debug": map[string]interface{}{ + "verbosity": "normal", }, }, "service": map[string]interface{}{ @@ -348,14 +348,14 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, }, }, }, }, { - name: "only HTTP, only metrics, logging debug", + name: "only HTTP, only metrics, logging with detailed verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -370,7 +370,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "debug", + "verbosity": "detailed", }, }, ocfg: map[string]interface{}{ @@ -395,8 +395,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "debug", + "debug": map[string]interface{}{ + "verbosity": "detailed", }, }, "service": map[string]interface{}{ @@ -404,14 +404,14 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, }, }, }, }, { - name: "only HTTP, metrics and traces, logging warn", + name: "only HTTP, metrics and traces, logging with basic verbosity", pcfg: PipelineConfig{ OTLPReceiverConfig: testutil.OTLPConfigFromPorts("bindhost", 0, 1234), TracePort: 5003, @@ -427,7 +427,7 @@ func TestNewMap(t *testing.T) { }, }, Debug: map[string]interface{}{ - "loglevel": "warn", + "verbosity": "basic", }, }, ocfg: map[string]interface{}{ @@ -462,8 +462,8 @@ func TestNewMap(t *testing.T) { }, }, }, - "logging": map[string]interface{}{ - "loglevel": "warn", + "debug": map[string]interface{}{ + "verbosity": "basic", }, }, "service": map[string]interface{}{ @@ -471,11 +471,11 @@ func TestNewMap(t *testing.T) { "pipelines": map[string]interface{}{ "traces": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"otlp", "logging"}, + "exporters": []interface{}{"otlp", "debug"}, }, "metrics": map[string]interface{}{ "receivers": []interface{}{"otlp"}, - "exporters": []interface{}{"serializer", "logging"}, + "exporters": []interface{}{"serializer", "debug"}, }, }, }, diff --git a/comp/otelcol/otlp/no_otlp.go b/comp/otelcol/otlp/no_otlp.go index 5f3f5b2ab0929..0800b404fc178 100644 --- a/comp/otelcol/otlp/no_otlp.go +++ b/comp/otelcol/otlp/no_otlp.go @@ -8,11 +8,11 @@ package otlp import ( - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // IsEnabled checks if OTLP pipeline is enabled in a given config. -func IsEnabled(_ config.Reader) bool { +func IsEnabled(_ model.Reader) bool { return false } diff --git a/comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml b/comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml deleted file mode 100644 index 92576a13e2c0a..0000000000000 --- a/comp/otelcol/otlp/testdata/debug/loglevel_disabled.yaml +++ /dev/null @@ -1,3 +0,0 @@ -otlp_config: - debug: - loglevel: disabled diff --git a/comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml b/comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml new file mode 100644 index 0000000000000..ff66512c59706 --- /dev/null +++ b/comp/otelcol/otlp/testdata/debug/verbosity_detailed.yaml @@ -0,0 +1,3 @@ +otlp_config: + debug: + verbosity: detailed diff --git a/comp/otelcol/otlp/testdata/debug/loglevel_debug.yaml b/comp/otelcol/otlp/testdata/debug/verbosity_none.yaml similarity index 52% rename from comp/otelcol/otlp/testdata/debug/loglevel_debug.yaml rename to comp/otelcol/otlp/testdata/debug/verbosity_none.yaml index 48df64859c790..bd281f2225170 100644 --- a/comp/otelcol/otlp/testdata/debug/loglevel_debug.yaml +++ b/comp/otelcol/otlp/testdata/debug/verbosity_none.yaml @@ -1,3 +1,3 @@ otlp_config: debug: - loglevel: debug + verbosity: none diff --git a/comp/otelcol/otlp/testdata/metrics/allconfig.yaml b/comp/otelcol/otlp/testdata/metrics/allconfig.yaml index 9982e9731a587..3d7b7849259ce 100644 --- a/comp/otelcol/otlp/testdata/metrics/allconfig.yaml +++ b/comp/otelcol/otlp/testdata/metrics/allconfig.yaml @@ -17,4 +17,4 @@ otlp_config: send_count_sum_metrics: true send_aggregation_metrics: true debug: - loglevel: debug + verbosity: detailed diff --git a/comp/otelcol/otlp/testutil/go.mod b/comp/otelcol/otlp/testutil/go.mod index cd00e6506c335..96f120bceec3d 100644 --- a/comp/otelcol/otlp/testutil/go.mod +++ b/comp/otelcol/otlp/testutil/go.mod @@ -12,7 +12,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto github.com/DataDog/datadog-agent/pkg/telemetry => ../../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable @@ -33,8 +35,8 @@ require ( github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/proto v0.55.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 github.com/DataDog/sketches-go v1.4.6 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/pdata v1.9.0 @@ -45,6 +47,8 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect @@ -83,7 +87,7 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/shirou/gopsutil/v3 v3.24.4 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -94,9 +98,9 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect google.golang.org/grpc v1.64.0 // indirect diff --git a/comp/otelcol/otlp/testutil/go.sum b/comp/otelcol/otlp/testutil/go.sum index b4d06b630c428..eaf41184c478e 100644 --- a/comp/otelcol/otlp/testutil/go.sum +++ b/comp/otelcol/otlp/testutil/go.sum @@ -1,9 +1,9 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0 h1:1FGr7e8wAebpvpoabdQcRt5WtPCJ2W2kDPzLfOb07/c= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= @@ -198,8 +198,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -286,8 +287,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -345,8 +346,8 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= diff --git a/comp/process/agent/agentimpl/agent.go b/comp/process/agent/agentimpl/agent.go index 4eb469a1540e4..ff9e5d80eae79 100644 --- a/comp/process/agent/agentimpl/agent.go +++ b/comp/process/agent/agentimpl/agent.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/process/runner" submitterComp "github.com/DataDog/datadog-agent/comp/process/submitter" "github.com/DataDog/datadog-agent/comp/process/types" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/checks" processStatsd "github.com/DataDog/datadog-agent/pkg/process/statsd" "github.com/DataDog/datadog-agent/pkg/util/flavor" @@ -100,7 +100,7 @@ func newProcessAgent(deps dependencies) (provides, error) { }, nil } - if err := processStatsd.Configure(ddconfig.GetBindHost(), deps.Config.GetInt("dogstatsd_port"), deps.Statsd.CreateForHostPort); err != nil { + if err := processStatsd.Configure(pkgconfigsetup.GetBindHost(pkgconfigsetup.Datadog()), deps.Config.GetInt("dogstatsd_port"), deps.Statsd.CreateForHostPort); err != nil { deps.Log.Criticalf("Error configuring statsd for process-agent: %s", err) return provides{ Comp: processAgent{ diff --git a/comp/process/agent/status.go b/comp/process/agent/status.go index d536af75aeca4..d7666144e7292 100644 --- a/comp/process/agent/status.go +++ b/comp/process/agent/status.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/status" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processStatus "github.com/DataDog/datadog-agent/pkg/process/util/status" ) @@ -63,7 +63,7 @@ func (s StatusProvider) populateStatus() map[string]interface{} { } else { // Get expVar server address - ipcAddr, err := ddconfig.GetIPCAddress() + ipcAddr, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { status["error"] = fmt.Sprintf("%v", err.Error()) return status diff --git a/comp/process/apiserver/apiserver.go b/comp/process/apiserver/apiserver.go index 66cf2b1d66851..c216b2a93fef4 100644 --- a/comp/process/apiserver/apiserver.go +++ b/comp/process/apiserver/apiserver.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/process-agent/api" log "github.com/DataDog/datadog-agent/comp/core/log/def" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var _ Component = (*apiserver)(nil) @@ -40,12 +40,12 @@ func newApiServer(deps dependencies) Component { r := mux.NewRouter() api.SetupAPIServerHandlers(deps.APIServerDeps, r) // Set up routes - addr, err := ddconfig.GetProcessAPIAddressPort() + addr, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return err } deps.Log.Infof("API server listening on %s", addr) - timeout := time.Duration(ddconfig.Datadog().GetInt("server_timeout")) * time.Second + timeout := time.Duration(pkgconfigsetup.Datadog().GetInt("server_timeout")) * time.Second apiserver := &apiserver{ server: &http.Server{ diff --git a/comp/process/expvars/expvarsimpl/expvars.go b/comp/process/expvars/expvarsimpl/expvars.go index 9125e7245d1b0..7d30b58185aee 100644 --- a/comp/process/expvars/expvarsimpl/expvars.go +++ b/comp/process/expvars/expvarsimpl/expvars.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/process/expvars" "github.com/DataDog/datadog-agent/comp/process/hostinfo" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" "github.com/DataDog/datadog-agent/pkg/process/status" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -91,8 +91,8 @@ func newExpvarServer(deps dependencies) (expvars.Component, error) { func getExpvarPort(deps dependencies) int { expVarPort := deps.Config.GetInt("process_config.expvar_port") if expVarPort <= 0 { - _ = deps.Log.Warnf("Invalid process_config.expvar_port -- %d, using default port %d", expVarPort, ddconfig.DefaultProcessExpVarPort) - expVarPort = ddconfig.DefaultProcessExpVarPort + _ = deps.Log.Warnf("Invalid process_config.expvar_port -- %d, using default port %d", expVarPort, pkgconfigsetup.DefaultProcessExpVarPort) + expVarPort = pkgconfigsetup.DefaultProcessExpVarPort } return expVarPort } diff --git a/comp/process/forwarders/forwardersimpl/forwarders.go b/comp/process/forwarders/forwardersimpl/forwarders.go index 2e81a8b290d8f..2e26c4c9f0c79 100644 --- a/comp/process/forwarders/forwardersimpl/forwarders.go +++ b/comp/process/forwarders/forwardersimpl/forwarders.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/resolver" "github.com/DataDog/datadog-agent/comp/process/forwarders" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -46,8 +46,8 @@ func newForwarders(deps dependencies) (forwarders.Component, error) { config := deps.Config queueBytes := config.GetInt("process_config.process_queue_bytes") if queueBytes <= 0 { - deps.Logger.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, ddconfig.DefaultProcessQueueBytes) - queueBytes = ddconfig.DefaultProcessQueueBytes + deps.Logger.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, pkgconfigsetup.DefaultProcessQueueBytes) + queueBytes = pkgconfigsetup.DefaultProcessQueueBytes } eventsAPIEndpoints, err := endpoint.GetEventsAPIEndpoints(config) @@ -73,7 +73,7 @@ func newForwarders(deps dependencies) (forwarders.Component, error) { } func createForwarder(deps dependencies, options *defaultforwarder.Options) defaultforwarder.Component { - return defaultforwarder.NewForwarder(deps.Config, deps.Logger, deps.Lc, false, options, false).Comp + return defaultforwarder.NewForwarder(deps.Config, deps.Logger, deps.Lc, false, options).Comp } func createParams(config config.Component, log log.Component, queueBytes int, endpoints []apicfg.Endpoint) *defaultforwarder.Options { diff --git a/comp/process/profiler/profilerimpl/profiler.go b/comp/process/profiler/profilerimpl/profiler.go index 1c6b53d884d38..bdcaa28f2bf71 100644 --- a/comp/process/profiler/profilerimpl/profiler.go +++ b/comp/process/profiler/profilerimpl/profiler.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" profilecomp "github.com/DataDog/datadog-agent/comp/process/profiler" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/profiling" @@ -71,7 +71,7 @@ func getProfilingSettings(cfg config.Component) profiling.Settings { if site == "" { s := cfg.GetString("site") if s == "" { - s = ddconfig.DefaultSite + s = pkgconfigsetup.DefaultSite } site = fmt.Sprintf(profiling.ProfilingURLTemplate, s) } diff --git a/comp/process/status/statusimpl/status.go b/comp/process/status/statusimpl/status.go index f7e8f8b307a3a..7002745a9f6ed 100644 --- a/comp/process/status/statusimpl/status.go +++ b/comp/process/status/statusimpl/status.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/status" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processStatus "github.com/DataDog/datadog-agent/pkg/process/util/status" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -84,7 +84,7 @@ func (s statusProvider) populateStatus() map[string]interface{} { } else { // Get expVar server address - ipcAddr, err := ddconfig.GetIPCAddress() + ipcAddr, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { status["error"] = fmt.Sprintf("%v", err.Error()) return status @@ -92,7 +92,7 @@ func (s statusProvider) populateStatus() map[string]interface{} { port := s.config.GetInt("process_config.expvar_port") if port <= 0 { - port = ddconfig.DefaultProcessExpVarPort + port = pkgconfigsetup.DefaultProcessExpVarPort } url = fmt.Sprintf("http://%s:%d/debug/vars", ipcAddr, port) } diff --git a/comp/remote-config/rcclient/rcclientimpl/rcclient.go b/comp/remote-config/rcclient/rcclientimpl/rcclient.go index 693c7d42e1971..e4dd131dea804 100644 --- a/comp/remote-config/rcclient/rcclientimpl/rcclient.go +++ b/comp/remote-config/rcclient/rcclientimpl/rcclient.go @@ -21,10 +21,10 @@ import ( "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient/types" "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/remote/client" "github.com/DataDog/datadog-agent/pkg/config/remote/data" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" @@ -71,7 +71,7 @@ type dependencies struct { // components that are instantiated last). Remote configuration client is a good candidate for this since it must be // able to interact with any other components (i.e. be at the end of the dependency graph). func newRemoteConfigClient(deps dependencies) (rcclient.Component, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } @@ -89,8 +89,8 @@ func newRemoteConfigClient(deps dependencies) (rcclient.Component, error) { // We have to create the client in the constructor and set its name later c, err := client.NewUnverifiedGRPCClient( ipcAddress, - config.GetIPCPort(), - func() (string, error) { return security.FetchAuthToken(config.Datadog()) }, + pkgconfigsetup.GetIPCPort(), + func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }, optsWithDefault..., ) if err != nil { @@ -98,11 +98,11 @@ func newRemoteConfigClient(deps dependencies) (rcclient.Component, error) { } var clientMRF *client.Client - if config.Datadog().GetBool("multi_region_failover.enabled") { + if pkgconfigsetup.Datadog().GetBool("multi_region_failover.enabled") { clientMRF, err = client.NewUnverifiedMRFGRPCClient( ipcAddress, - config.GetIPCPort(), - func() (string, error) { return security.FetchAuthToken(config.Datadog()) }, + pkgconfigsetup.GetIPCPort(), + func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }, optsWithDefault..., ) if err != nil { @@ -119,7 +119,7 @@ func newRemoteConfigClient(deps dependencies) (rcclient.Component, error) { settingsComponent: deps.SettingsComponent, } - if config.IsRemoteConfigEnabled(config.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { deps.Lc.Append(fx.Hook{ OnStart: func(context.Context) error { rc.start() @@ -160,19 +160,19 @@ func (rc rcClient) start() { func (rc rcClient) mrfUpdateCallback(updates map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) { // If the updates map is empty, we should unset the failover settings if they were set via RC previously if len(updates) == 0 { - mrfFailoverMetricsSource := config.Datadog().GetSource("multi_region_failover.failover_metrics") - mrfFailoverLogsSource := config.Datadog().GetSource("multi_region_failover.failover_logs") + mrfFailoverMetricsSource := pkgconfigsetup.Datadog().GetSource("multi_region_failover.failover_metrics") + mrfFailoverLogsSource := pkgconfigsetup.Datadog().GetSource("multi_region_failover.failover_logs") // Unset the RC-sourced failover values regardless of what they are - config.Datadog().UnsetForSource("multi_region_failover.failover_metrics", model.SourceRC) - config.Datadog().UnsetForSource("multi_region_failover.failover_logs", model.SourceRC) + pkgconfigsetup.Datadog().UnsetForSource("multi_region_failover.failover_metrics", model.SourceRC) + pkgconfigsetup.Datadog().UnsetForSource("multi_region_failover.failover_logs", model.SourceRC) // If either of the values were previously set via RC, log the current values now that we've unset them if mrfFailoverMetricsSource == model.SourceRC { - pkglog.Infof("Falling back to `multi_region_failover.failover_metrics: %t`", config.Datadog().GetBool("multi_region_failover.failover_metrics")) + pkglog.Infof("Falling back to `multi_region_failover.failover_metrics: %t`", pkgconfigsetup.Datadog().GetBool("multi_region_failover.failover_metrics")) } if mrfFailoverLogsSource == model.SourceRC { - pkglog.Infof("Falling back to `multi_region_failover.failover_logs: %t`", config.Datadog().GetBool("multi_region_failover.failover_logs")) + pkglog.Infof("Falling back to `multi_region_failover.failover_logs: %t`", pkgconfigsetup.Datadog().GetBool("multi_region_failover.failover_logs")) } return } @@ -261,7 +261,7 @@ func (rc rcClient) agentConfigUpdateCallback(updates map[string]state.RawConfig, } // Checks who (the source) is responsible for the last logLevel change - source := config.Datadog().GetSource("log_level") + source := pkgconfigsetup.Datadog().GetSource("log_level") switch source { case model.SourceRC: @@ -269,8 +269,8 @@ func (rc rcClient) agentConfigUpdateCallback(updates map[string]state.RawConfig, // - we want to change (once again) the log level through RC // - we want to fall back to the log level we had saved as fallback (in that case mergedConfig.LogLevel == "") if len(mergedConfig.LogLevel) == 0 { - pkglog.Infof("Removing remote-config log level override, falling back to '%s'", config.Datadog().Get("log_level")) - config.Datadog().UnsetForSource("log_level", model.SourceRC) + pkgconfigsetup.Datadog().UnsetForSource("log_level", model.SourceRC) + pkglog.Infof("Removing remote-config log level override, falling back to '%s'", pkgconfigsetup.Datadog().Get("log_level")) } else { newLevel := mergedConfig.LogLevel pkglog.Infof("Changing log level to '%s' through remote config", newLevel) diff --git a/comp/remote-config/rcclient/rcclientimpl/rcclient_test.go b/comp/remote-config/rcclient/rcclientimpl/rcclient_test.go index b7ba8ab8ff88d..d31decbceb748 100644 --- a/comp/remote-config/rcclient/rcclientimpl/rcclient_test.go +++ b/comp/remote-config/rcclient/rcclientimpl/rcclient_test.go @@ -16,10 +16,10 @@ import ( "github.com/DataDog/datadog-agent/comp/core/settings/settingsimpl" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" @@ -43,7 +43,7 @@ func (m *mockLogLevelRuntimeSettings) Set(_ config.Component, v interface{}, sou return m.expectedError } m.logLevel = v.(string) - pkgconfig.Datadog().Set(m.Name(), m.logLevel, source) + pkgconfigsetup.Datadog().Set(m.Name(), m.logLevel, source) return nil } @@ -122,11 +122,11 @@ func TestAgentConfigCallback(t *testing.T) { structRC := rc.(rcClient) - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) assert.NoError(t, err) structRC.client, _ = client.NewUnverifiedGRPCClient( - ipcAddress, pkgconfig.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(pkgconfig.Datadog()) }, + ipcAddress, pkgconfigsetup.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }, client.WithAgent("test-agent", "9.99.9"), client.WithProducts(state.ProductAgentConfig), client.WithPollInterval(time.Hour), @@ -134,15 +134,15 @@ func TestAgentConfigCallback(t *testing.T) { // ----------------- // Test scenario #1: Agent Flare request by RC and the log level hadn't been changed by the user before - assert.Equal(t, model.SourceDefault, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, model.SourceDefault, pkgconfigsetup.Datadog().GetSource("log_level")) // Set log level to debug structRC.agentConfigUpdateCallback(map[string]state.RawConfig{ "datadog/2/AGENT_CONFIG/layer1/configname": layerStartFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) - assert.Equal(t, "debug", pkgconfig.Datadog().Get("log_level")) - assert.Equal(t, model.SourceRC, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, "debug", pkgconfigsetup.Datadog().Get("log_level")) + assert.Equal(t, model.SourceRC, pkgconfigsetup.Datadog().GetSource("log_level")) // Send an empty log level request, as RC would at the end of the Agent Flare request // Should fallback to the default level @@ -150,36 +150,36 @@ func TestAgentConfigCallback(t *testing.T) { "datadog/2/AGENT_CONFIG/layer1/configname": layerEndFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) - assert.Equal(t, "info", pkgconfig.Datadog().Get("log_level")) - assert.Equal(t, model.SourceDefault, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, "info", pkgconfigsetup.Datadog().Get("log_level")) + assert.Equal(t, model.SourceDefault, pkgconfigsetup.Datadog().GetSource("log_level")) // ----------------- // Test scenario #2: log level was changed by the user BEFORE Agent Flare request - pkgconfig.Datadog().Set("log_level", "info", model.SourceCLI) + pkgconfigsetup.Datadog().Set("log_level", "info", model.SourceCLI) structRC.agentConfigUpdateCallback(map[string]state.RawConfig{ "datadog/2/AGENT_CONFIG/layer1/configname": layerStartFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) // Log level should still be "info" because it was enforced by the user - assert.Equal(t, "info", pkgconfig.Datadog().Get("log_level")) + assert.Equal(t, "info", pkgconfigsetup.Datadog().Get("log_level")) // Source should still be CLI as it has priority over RC - assert.Equal(t, model.SourceCLI, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, model.SourceCLI, pkgconfigsetup.Datadog().GetSource("log_level")) // ----------------- // Test scenario #3: log level is changed by the user DURING the Agent Flare request - pkgconfig.Datadog().UnsetForSource("log_level", model.SourceCLI) + pkgconfigsetup.Datadog().UnsetForSource("log_level", model.SourceCLI) structRC.agentConfigUpdateCallback(map[string]state.RawConfig{ "datadog/2/AGENT_CONFIG/layer1/configname": layerStartFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) - assert.Equal(t, "debug", pkgconfig.Datadog().Get("log_level")) - assert.Equal(t, model.SourceRC, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, "debug", pkgconfigsetup.Datadog().Get("log_level")) + assert.Equal(t, model.SourceRC, pkgconfigsetup.Datadog().GetSource("log_level")) - pkgconfig.Datadog().Set("log_level", "debug", model.SourceCLI) + pkgconfigsetup.Datadog().Set("log_level", "debug", model.SourceCLI) structRC.agentConfigUpdateCallback(map[string]state.RawConfig{ "datadog/2/AGENT_CONFIG/layer1/configname": layerEndFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) - assert.Equal(t, "debug", pkgconfig.Datadog().Get("log_level")) - assert.Equal(t, model.SourceCLI, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, "debug", pkgconfigsetup.Datadog().Get("log_level")) + assert.Equal(t, model.SourceCLI, pkgconfigsetup.Datadog().GetSource("log_level")) } diff --git a/comp/remote-config/rcservice/rcserviceimpl/rcservice.go b/comp/remote-config/rcservice/rcserviceimpl/rcservice.go index d212af87f63ed..48226f0dcb916 100644 --- a/comp/remote-config/rcservice/rcserviceimpl/rcservice.go +++ b/comp/remote-config/rcservice/rcserviceimpl/rcservice.go @@ -19,8 +19,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname" "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter" - "github.com/DataDog/datadog-agent/pkg/config" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/version" @@ -50,7 +50,7 @@ type dependencies struct { // newRemoteConfigServiceOptional conditionally creates and configures a new remote config service, based on whether RC is enabled. func newRemoteConfigServiceOptional(deps dependencies) optional.Option[rcservice.Component] { none := optional.NewNoneOption[rcservice.Component]() - if !config.IsRemoteConfigEnabled(deps.Cfg) { + if !pkgconfigsetup.IsRemoteConfigEnabled(deps.Cfg) { return none } diff --git a/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go b/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go index 46fe18b7e0894..a474da3cfeb89 100644 --- a/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go +++ b/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname" "github.com/DataDog/datadog-agent/comp/remote-config/rcservicemrf" "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter" - "github.com/DataDog/datadog-agent/pkg/config" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -49,7 +49,7 @@ type dependencies struct { // newMrfRemoteConfigServiceOptional conditionally creates and configures a new MRF remote config service, based on whether RC is enabled. func newMrfRemoteConfigServiceOptional(deps dependencies) optional.Option[rcservicemrf.Component] { none := optional.NewNoneOption[rcservicemrf.Component]() - if !config.IsRemoteConfigEnabled(deps.Cfg) || !deps.Cfg.GetBool("multi_region_failover.enabled") { + if !pkgconfigsetup.IsRemoteConfigEnabled(deps.Cfg) || !deps.Cfg.GetBool("multi_region_failover.enabled") { return none } diff --git a/comp/serializer/compression/go.mod b/comp/serializer/compression/go.mod index efb7938bcb160..28510067b064e 100644 --- a/comp/serializer/compression/go.mod +++ b/comp/serializer/compression/go.mod @@ -15,7 +15,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/telemetry => ../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem @@ -48,7 +50,9 @@ require ( github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect @@ -77,7 +81,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -90,12 +94,12 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/serializer/compression/go.sum b/comp/serializer/compression/go.sum index f283093babdbd..84122e1cd8f8b 100644 --- a/comp/serializer/compression/go.sum +++ b/comp/serializer/compression/go.sum @@ -184,8 +184,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -257,15 +258,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -301,8 +302,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -318,8 +319,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/comp/snmptraps/config/config.go b/comp/snmptraps/config/config.go index 97afe10653c77..418f38a107dd0 100644 --- a/comp/snmptraps/config/config.go +++ b/comp/snmptraps/config/config.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/snmptraps/snmplog" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/snmp/gosnmplib" "github.com/DataDog/datadog-agent/pkg/snmp/utils" ) @@ -51,7 +52,7 @@ type TrapsConfig struct { // ReadConfig builds the traps configuration from the Agent configuration. func ReadConfig(host string, conf config.Component) (*TrapsConfig, error) { var c = &TrapsConfig{} - err := conf.UnmarshalKey("network_devices.snmp_traps", &c) + err := structure.UnmarshalKey(conf, "network_devices.snmp_traps", c) if err != nil { return nil, err } diff --git a/comp/systray/systray/systrayimpl/doflare.go b/comp/systray/systray/systrayimpl/doflare.go index dc0c8228011f7..229f1f113f71c 100644 --- a/comp/systray/systray/systrayimpl/doflare.go +++ b/comp/systray/systray/systrayimpl/doflare.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/flare/helpers" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -171,14 +171,14 @@ func requestFlare(s *systrayImpl, caseID, customerEmail string) (response string s.log.Debug("Asking the agent to build the flare archive.") c := util.GetClient(false) // FIX: get certificates right then make this true - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } - urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, config.Datadog().GetInt("cmd_port")) + urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) // Set session token - e = util.SetAuthToken(config.Datadog()) + e = util.SetAuthToken(pkgconfigsetup.Datadog()) if e != nil { return } diff --git a/comp/trace/agent/def/go.mod b/comp/trace/agent/def/go.mod index 3647b991a6acd..3519642a216f9 100644 --- a/comp/trace/agent/def/go.mod +++ b/comp/trace/agent/def/go.mod @@ -6,7 +6,7 @@ replace github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 go.opentelemetry.io/collector/pdata v1.9.0 ) diff --git a/comp/trace/agent/def/go.sum b/comp/trace/agent/def/go.sum index 99ed13405a9d9..d7e46e4a7be46 100644 --- a/comp/trace/agent/def/go.sum +++ b/comp/trace/agent/def/go.sum @@ -1,5 +1,5 @@ -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= diff --git a/comp/trace/agent/impl/run.go b/comp/trace/agent/impl/run.go index d88e015dc4ee7..15bc8ed8ea198 100644 --- a/comp/trace/agent/impl/run.go +++ b/comp/trace/agent/impl/run.go @@ -15,8 +15,8 @@ import ( "github.com/DataDog/datadog-agent/comp/trace/config" "github.com/DataDog/datadog-agent/pkg/api/security" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" rc "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/trace/api" tracecfg "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" @@ -40,13 +40,13 @@ func runAgentSidekicks(ag component) error { defer watchdog.LogOnPanic(ag.Statsd) - if err := util.SetupCoreDump(coreconfig.Datadog()); err != nil { + if err := util.SetupCoreDump(pkgconfigsetup.Datadog()); err != nil { log.Warnf("Can't setup core dumps: %v, core dumps might not be available after a crash", err) } rand.Seed(time.Now().UTC().UnixNano()) - if coreconfig.IsRemoteConfigEnabled(coreconfig.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { cf, err := newConfigFetcher() if err != nil { ag.telemetryCollector.SendStartupError(telemetry.CantCreateRCCLient, err) @@ -65,7 +65,7 @@ func runAgentSidekicks(ag component) error { // the trace agent. // pkg/config is not a go-module yet and pulls a large chunk of Agent code base with it. Using it within the // trace-agent would largely increase the number of module pulled by OTEL when using the pkg/trace go-module. - if err := apiutil.CreateAndSetAuthToken(coreconfig.Datadog()); err != nil { + if err := apiutil.CreateAndSetAuthToken(pkgconfigsetup.Datadog()); err != nil { log.Errorf("could not set auth token: %s", err) } else { ag.Agent.DebugServer.AddRoute("/config", ag.config.GetConfigHandler()) @@ -106,37 +106,37 @@ func stopAgentSidekicks(cfg config.Component, statsd statsd.ClientInterface) { } func profilingConfig(tracecfg *tracecfg.AgentConfig) *profiling.Settings { - if !coreconfig.Datadog().GetBool("apm_config.internal_profiling.enabled") { + if !pkgconfigsetup.Datadog().GetBool("apm_config.internal_profiling.enabled") { return nil } - endpoint := coreconfig.Datadog().GetString("internal_profiling.profile_dd_url") + endpoint := pkgconfigsetup.Datadog().GetString("internal_profiling.profile_dd_url") if endpoint == "" { endpoint = fmt.Sprintf(profiling.ProfilingURLTemplate, tracecfg.Site) } - tags := coreconfig.Datadog().GetStringSlice("internal_profiling.extra_tags") + tags := pkgconfigsetup.Datadog().GetStringSlice("internal_profiling.extra_tags") tags = append(tags, fmt.Sprintf("version:%s", version.AgentVersion)) return &profiling.Settings{ ProfilingURL: endpoint, // remaining configuration parameters use the top-level `internal_profiling` config - Period: coreconfig.Datadog().GetDuration("internal_profiling.period"), + Period: pkgconfigsetup.Datadog().GetDuration("internal_profiling.period"), Service: "trace-agent", - CPUDuration: coreconfig.Datadog().GetDuration("internal_profiling.cpu_duration"), - MutexProfileFraction: coreconfig.Datadog().GetInt("internal_profiling.mutex_profile_fraction"), - BlockProfileRate: coreconfig.Datadog().GetInt("internal_profiling.block_profile_rate"), - WithGoroutineProfile: coreconfig.Datadog().GetBool("internal_profiling.enable_goroutine_stacktraces"), - WithBlockProfile: coreconfig.Datadog().GetBool("internal_profiling.enable_block_profiling"), - WithMutexProfile: coreconfig.Datadog().GetBool("internal_profiling.enable_mutex_profiling"), + CPUDuration: pkgconfigsetup.Datadog().GetDuration("internal_profiling.cpu_duration"), + MutexProfileFraction: pkgconfigsetup.Datadog().GetInt("internal_profiling.mutex_profile_fraction"), + BlockProfileRate: pkgconfigsetup.Datadog().GetInt("internal_profiling.block_profile_rate"), + WithGoroutineProfile: pkgconfigsetup.Datadog().GetBool("internal_profiling.enable_goroutine_stacktraces"), + WithBlockProfile: pkgconfigsetup.Datadog().GetBool("internal_profiling.enable_block_profiling"), + WithMutexProfile: pkgconfigsetup.Datadog().GetBool("internal_profiling.enable_mutex_profiling"), Tags: tags, } } func newConfigFetcher() (rc.ConfigFetcher, error) { - ipcAddress, err := coreconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } // Auth tokens are handled by the rcClient - return rc.NewAgentGRPCConfigFetcher(ipcAddress, coreconfig.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(coreconfig.Datadog()) }) + return rc.NewAgentGRPCConfigFetcher(ipcAddress, pkgconfigsetup.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }) } diff --git a/comp/trace/config/component.go b/comp/trace/config/component.go index 3e34c58739461..6a5b4427d6d30 100644 --- a/comp/trace/config/component.go +++ b/comp/trace/config/component.go @@ -20,7 +20,7 @@ import ( "go.uber.org/fx" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" traceconfig "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -30,7 +30,7 @@ import ( // Component is the component type. type Component interface { // Warnings returns config warnings collected during setup. - Warnings() *config.Warnings + Warnings() *model.Warnings // SetHandler returns a handler for runtime configuration changes. SetHandler() http.Handler diff --git a/comp/trace/config/config.go b/comp/trace/config/config.go index f00889c39a708..3aca85dc07b32 100644 --- a/comp/trace/config/config.go +++ b/comp/trace/config/config.go @@ -17,9 +17,9 @@ import ( coreconfig "github.com/DataDog/datadog-agent/comp/core/config" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pkgconfigutils "github.com/DataDog/datadog-agent/pkg/config/utils" traceconfig "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -46,7 +46,7 @@ type cfg struct { coreConfig coreconfig.Component // warnings are the warnings generated during setup - warnings *pkgconfig.Warnings + warnings *model.Warnings } // NewConfig is the default constructor for the component, it returns @@ -70,7 +70,7 @@ func NewConfig(deps Dependencies) (Component, error) { return &c, nil } -func (c *cfg) Warnings() *pkgconfig.Warnings { +func (c *cfg) Warnings() *model.Warnings { return c.warnings } @@ -96,7 +96,7 @@ func (c *cfg) SetHandler() http.Handler { if lvl == "warning" { lvl = "warn" } - if err := pkgconfigutils.SetLogLevel(lvl, pkgconfig.Datadog(), model.SourceAgentRuntime); err != nil { + if err := pkgconfigutils.SetLogLevel(lvl, pkgconfigsetup.Datadog(), model.SourceAgentRuntime); err != nil { httpError(w, http.StatusInternalServerError, err) return } diff --git a/comp/trace/config/config_mock.go b/comp/trace/config/config_mock.go index 38d2f01fce4ba..d0ef2dfbc848a 100644 --- a/comp/trace/config/config_mock.go +++ b/comp/trace/config/config_mock.go @@ -11,8 +11,8 @@ package config import ( "testing" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // newMock exported mock builder to allow modifying mocks that might be @@ -24,7 +24,7 @@ func newMock(deps Dependencies, _ testing.TB) (Component, error) { } c := cfg{ - warnings: &pkgconfig.Warnings{}, + warnings: &model.Warnings{}, coreConfig: deps.Config, AgentConfig: traceCfg, } diff --git a/comp/trace/config/config_test.go b/comp/trace/config/config_test.go index 093b90295e106..fcf3e598a8fd1 100644 --- a/comp/trace/config/config_test.go +++ b/comp/trace/config/config_test.go @@ -31,8 +31,8 @@ import ( corecomp "github.com/DataDog/datadog-agent/comp/core/config" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" traceconfig "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -882,9 +882,9 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) if tt.envNew == "DD_APM_IGNORE_RESOURCES" { - assert.Equal(t, []string{"4", "5", "6"}, coreconfig.Datadog().GetStringSlice(tt.key)) + assert.Equal(t, []string{"4", "5", "6"}, pkgconfigsetup.Datadog().GetStringSlice(tt.key)) } else { - assert.Equal(t, "4,5,6", coreconfig.Datadog().GetString(tt.key)) + assert.Equal(t, "4,5,6", pkgconfigsetup.Datadog().GetString(tt.key)) } } }) @@ -1504,7 +1504,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-site.com", coreconfig.Datadog().GetString("apm_config.profiling_dd_url")) + assert.Equal(t, "my-site.com", pkgconfigsetup.Datadog().GetString("apm_config.profiling_dd_url")) }) env = "DD_APM_DEBUGGER_DD_URL" @@ -1522,7 +1522,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-site.com", coreconfig.Datadog().GetString("apm_config.debugger_dd_url")) + assert.Equal(t, "my-site.com", pkgconfigsetup.Datadog().GetString("apm_config.debugger_dd_url")) }) env = "DD_APM_DEBUGGER_API_KEY" @@ -1540,7 +1540,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-key", coreconfig.Datadog().GetString("apm_config.debugger_api_key")) + assert.Equal(t, "my-key", pkgconfigsetup.Datadog().GetString("apm_config.debugger_api_key")) }) env = "DD_APM_DEBUGGER_ADDITIONAL_ENDPOINTS" @@ -1562,7 +1562,7 @@ func TestLoadEnv(t *testing.T) { "url2": {"key3"}, } - actual := coreconfig.Datadog().GetStringMapStringSlice("apm_config.debugger_additional_endpoints") + actual := pkgconfigsetup.Datadog().GetStringMapStringSlice("apm_config.debugger_additional_endpoints") if !reflect.DeepEqual(actual, expected) { t.Fatalf("Failed to process env var %s, expected %v and got %v", env, expected, actual) } @@ -1583,7 +1583,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-diagnostics-site.com", coreconfig.Datadog().GetString("apm_config.debugger_diagnostics_dd_url")) + assert.Equal(t, "my-diagnostics-site.com", pkgconfigsetup.Datadog().GetString("apm_config.debugger_diagnostics_dd_url")) }) env = "DD_APM_DEBUGGER_DIAGNOSTICS_API_KEY" @@ -1601,7 +1601,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-diagnostics-key", coreconfig.Datadog().GetString("apm_config.debugger_diagnostics_api_key")) + assert.Equal(t, "my-diagnostics-key", pkgconfigsetup.Datadog().GetString("apm_config.debugger_diagnostics_api_key")) }) env = "DD_APM_DEBUGGER_DIAGNOSTICS_ADDITIONAL_ENDPOINTS" @@ -1623,7 +1623,7 @@ func TestLoadEnv(t *testing.T) { "diagnostics-url2": {"diagnostics-key3"}, } - actual := coreconfig.Datadog().GetStringMapStringSlice("apm_config.debugger_diagnostics_additional_endpoints") + actual := pkgconfigsetup.Datadog().GetStringMapStringSlice("apm_config.debugger_diagnostics_additional_endpoints") if !reflect.DeepEqual(actual, expected) { t.Fatalf("Failed to process env var %s, expected %v and got %v", env, expected, actual) } @@ -1643,7 +1643,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, "my-site.com", coreconfig.Datadog().GetString("apm_config.symdb_dd_url")) + assert.Equal(t, "my-site.com", pkgconfigsetup.Datadog().GetString("apm_config.symdb_dd_url")) }) env = "DD_APM_SYMDB_API_KEY" @@ -1660,7 +1660,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, "my-key", coreconfig.Datadog().GetString("apm_config.symdb_api_key")) + assert.Equal(t, "my-key", pkgconfigsetup.Datadog().GetString("apm_config.symdb_api_key")) }) env = "DD_APM_SYMDB_ADDITIONAL_ENDPOINTS" @@ -1682,7 +1682,7 @@ func TestLoadEnv(t *testing.T) { "url2": {"key3"}, } - actual := coreconfig.Datadog().GetStringMapStringSlice("apm_config.symdb_additional_endpoints") + actual := pkgconfigsetup.Datadog().GetStringMapStringSlice("apm_config.symdb_additional_endpoints") if !reflect.DeepEqual(actual, expected) { t.Fatalf("Failed to process env var %s, expected %v and got %v", env, expected, actual) } @@ -1703,7 +1703,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.False(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.credit_cards.enabled")) + assert.False(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.credit_cards.enabled")) assert.False(t, cfg.Obfuscation.CreditCards.Enabled) }) @@ -1721,7 +1721,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.False(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.credit_cards.luhn")) + assert.False(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.credit_cards.luhn")) }) env = "DD_APM_OBFUSCATION_ELASTICSEARCH_ENABLED" @@ -1738,7 +1738,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled")) assert.True(t, cfg.Obfuscation.ES.Enabled) }) @@ -1757,7 +1757,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"client_id", "product_id"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") actualParsed := cfg.Obfuscation.ES.KeepValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -1778,7 +1778,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"key1", "key2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") actualParsed := cfg.Obfuscation.ES.ObfuscateSQLValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -1798,7 +1798,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string")) assert.True(t, cfg.Obfuscation.HTTP.RemoveQueryString) }) @@ -1816,7 +1816,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) assert.True(t, cfg.Obfuscation.Memcached.Enabled) }) @@ -1834,7 +1834,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) assert.True(t, cfg.Obfuscation.Memcached.Enabled) }) @@ -1852,9 +1852,9 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) assert.True(t, cfg.Obfuscation.Memcached.Enabled) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command")) assert.True(t, cfg.Obfuscation.Memcached.KeepCommand) }) @@ -1872,7 +1872,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled")) assert.True(t, cfg.Obfuscation.Mongo.Enabled) }) @@ -1891,7 +1891,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"document_id", "template_id"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") actualParsed := cfg.Obfuscation.Mongo.KeepValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -1912,7 +1912,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"key1", "key2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") actualParsed := cfg.Obfuscation.Mongo.ObfuscateSQLValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -1932,7 +1932,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.redis.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.enabled")) assert.True(t, cfg.Obfuscation.Redis.Enabled) }) @@ -1950,7 +1950,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args")) assert.True(t, cfg.Obfuscation.Redis.RemoveAllArgs) }) @@ -1968,7 +1968,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces")) assert.True(t, cfg.Obfuscation.RemoveStackTraces) }) @@ -1986,7 +1986,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled")) assert.True(t, cfg.Obfuscation.SQLExecPlan.Enabled) }) @@ -2005,7 +2005,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"id1", "id2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") actualParsed := cfg.Obfuscation.SQLExecPlan.KeepValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -2026,7 +2026,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"key1", "key2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") actualParsed := cfg.Obfuscation.SQLExecPlan.ObfuscateSQLValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -2046,7 +2046,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled")) assert.True(t, cfg.Obfuscation.SQLExecPlanNormalize.Enabled) }) @@ -2065,7 +2065,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"id1", "id2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") actualParsed := cfg.Obfuscation.SQLExecPlanNormalize.KeepValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -2086,7 +2086,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"key1", "key2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") actualParsed := cfg.Obfuscation.SQLExecPlanNormalize.ObfuscateSQLValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -2111,7 +2111,7 @@ func TestLoadEnv(t *testing.T) { "url1": {"key1", "key2"}, "url2": {"key3"}, } - actual := coreconfig.Datadog().GetStringMapStringSlice("apm_config.profiling_additional_endpoints") + actual := pkgconfigsetup.Datadog().GetStringMapStringSlice("apm_config.profiling_additional_endpoints") if !reflect.DeepEqual(actual, expected) { t.Fatalf("Failed to process env var %s, expected %v and got %v", env, expected, actual) } @@ -2160,7 +2160,7 @@ func TestLoadEnv(t *testing.T) { )) cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, "install_id_foo_bar", coreconfig.Datadog().GetString("apm_config.install_id")) + assert.Equal(t, "install_id_foo_bar", pkgconfigsetup.Datadog().GetString("apm_config.install_id")) assert.Equal(t, "install_id_foo_bar", cfg.InstallSignature.InstallID) assert.True(t, cfg.InstallSignature.Found) }) @@ -2178,7 +2178,7 @@ func TestLoadEnv(t *testing.T) { )) cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, "host_injection", coreconfig.Datadog().GetString("apm_config.install_type")) + assert.Equal(t, "host_injection", pkgconfigsetup.Datadog().GetString("apm_config.install_type")) assert.Equal(t, "host_injection", cfg.InstallSignature.InstallType) assert.True(t, cfg.InstallSignature.Found) }) @@ -2196,7 +2196,7 @@ func TestLoadEnv(t *testing.T) { )) cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, int64(1699621675), coreconfig.Datadog().GetInt64("apm_config.install_time")) + assert.Equal(t, int64(1699621675), pkgconfigsetup.Datadog().GetInt64("apm_config.install_time")) assert.Equal(t, int64(1699621675), cfg.InstallSignature.InstallTime) assert.True(t, cfg.InstallSignature.Found) }) @@ -2476,9 +2476,9 @@ func TestGenerateInstallSignature(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.False(t, coreconfig.Datadog().IsSet("apm_config.install_id")) - assert.False(t, coreconfig.Datadog().IsSet("apm_config.install_type")) - assert.False(t, coreconfig.Datadog().IsSet("apm_config.install_time")) + assert.False(t, pkgconfigsetup.Datadog().IsSet("apm_config.install_id")) + assert.False(t, pkgconfigsetup.Datadog().IsSet("apm_config.install_type")) + assert.False(t, pkgconfigsetup.Datadog().IsSet("apm_config.install_time")) assert.True(t, cfg.InstallSignature.Found) installFilePath := filepath.Join(cfgDir, "install.json") diff --git a/comp/trace/config/hostname.go b/comp/trace/config/hostname.go index 294bee77a5b2a..03264e9c005ec 100644 --- a/comp/trace/config/hostname.go +++ b/comp/trace/config/hostname.go @@ -17,7 +17,7 @@ import ( "strings" "time" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/grpc" @@ -45,12 +45,12 @@ func acquireHostname(c *config.AgentConfig) error { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - ipcAddress, err := coreconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - client, err := grpc.GetDDAgentClient(ctx, ipcAddress, coreconfig.GetIPCPort()) + client, err := grpc.GetDDAgentClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } diff --git a/comp/trace/config/remote.go b/comp/trace/config/remote.go index 5be3e504a7579..28d01e46ae6aa 100644 --- a/comp/trace/config/remote.go +++ b/comp/trace/config/remote.go @@ -10,8 +10,8 @@ package config import ( corecompcfg "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/api/security" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" rc "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/version" @@ -20,7 +20,7 @@ import ( func remote(c corecompcfg.Component, ipcAddress string) (config.RemoteClient, error) { return rc.NewGRPCClient( ipcAddress, - coreconfig.GetIPCPort(), + pkgconfigsetup.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(c) }, rc.WithAgent(rcClientName, version.AgentVersion), rc.WithProducts(state.ProductAPMSampling, state.ProductAgentConfig), diff --git a/comp/trace/config/setup.go b/comp/trace/config/setup.go index 86f192e10fdfb..df0e554d76262 100644 --- a/comp/trace/config/setup.go +++ b/comp/trace/config/setup.go @@ -25,9 +25,10 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/otelcol/otlp" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" @@ -98,17 +99,17 @@ func prepareConfig(c corecompcfg.Component) (*config.AgentConfig, error) { cfg.LogFilePath = DefaultLogFilePath } - ipcAddress, err := coreconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } orch := fargate.GetOrchestrator() // Needs to be after loading config, because it relies on feature auto-detection cfg.FargateOrchestrator = config.FargateOrchestratorName(orch) - if p := coreconfig.Datadog().GetProxies(); p != nil { + if p := pkgconfigsetup.Datadog().GetProxies(); p != nil { cfg.Proxy = httputils.GetProxyTransportFunc(p, c) } - if coreconfig.IsRemoteConfigEnabled(coreConfigObject) && coreConfigObject.GetBool("remote_configuration.apm_sampling.enabled") { + if pkgconfigsetup.IsRemoteConfigEnabled(coreConfigObject) && coreConfigObject.GetBool("remote_configuration.apm_sampling.enabled") { client, err := remote(c, ipcAddress) if err != nil { log.Errorf("Error when subscribing to remote config management %v", err) @@ -129,10 +130,10 @@ func containerTagsFunc(cid string) ([]string, error) { // The format for cfgKey should be a map which has the URL as a key and one or // more API keys as an array value. func appendEndpoints(endpoints []*config.Endpoint, cfgKey string) []*config.Endpoint { - if !coreconfig.Datadog().IsSet(cfgKey) { + if !pkgconfigsetup.Datadog().IsSet(cfgKey) { return endpoints } - for url, keys := range coreconfig.Datadog().GetStringMapStringSlice(cfgKey) { + for url, keys := range pkgconfigsetup.Datadog().GetStringMapStringSlice(cfgKey) { if len(keys) == 0 { log.Errorf("'%s' entries must have at least one API key present", cfgKey) continue @@ -149,7 +150,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.Endpoints = []*config.Endpoint{{}} } if core.IsSet("api_key") { - c.Endpoints[0].APIKey = utils.SanitizeAPIKey(coreconfig.Datadog().GetString("api_key")) + c.Endpoints[0].APIKey = utils.SanitizeAPIKey(pkgconfigsetup.Datadog().GetString("api_key")) } if core.IsSet("hostname") { c.Hostname = core.GetString("hostname") @@ -166,7 +167,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.Endpoints[0].Host = host } } else { - c.Endpoints[0].Host = utils.GetMainEndpoint(coreconfig.Datadog(), apiEndpointPrefix, "apm_config.apm_dd_url") + c.Endpoints[0].Host = utils.GetMainEndpoint(pkgconfigsetup.Datadog(), apiEndpointPrefix, "apm_config.apm_dd_url") } c.Endpoints = appendEndpoints(c.Endpoints, "apm_config.additional_endpoints") @@ -197,11 +198,11 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error if core.IsSet("apm_config.enabled") { c.Enabled = core.GetBool("apm_config.enabled") } - if coreconfig.Datadog().IsSet("apm_config.log_file") { - c.LogFilePath = coreconfig.Datadog().GetString("apm_config.log_file") + if pkgconfigsetup.Datadog().IsSet("apm_config.log_file") { + c.LogFilePath = pkgconfigsetup.Datadog().GetString("apm_config.log_file") } - if env := utils.GetTraceAgentDefaultEnv(coreconfig.Datadog()); env != "" { + if env := utils.GetTraceAgentDefaultEnv(pkgconfigsetup.Datadog()); env != "" { c.DefaultEnv = env } @@ -312,7 +313,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error if k := "apm_config.replace_tags"; core.IsSet(k) { rt := make([]*config.ReplaceRule, 0) - if err := coreconfig.Datadog().UnmarshalKey(k, &rt); err != nil { + if err := structure.UnmarshalKey(core, k, &rt); err != nil { log.Errorf("Bad format for %q it should be of the form '[{\"name\": \"tag_name\",\"pattern\":\"pattern\",\"repl\":\"replace_str\"}]', error: %v", "apm_config.replace_tags", err) } else { err := compileReplaceRules(rt) @@ -346,8 +347,8 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.GUIPort = core.GetString("GUI_port") var grpcPort int - if otlp.IsEnabled(coreconfig.Datadog()) { - grpcPort = core.GetInt(coreconfig.OTLPTracePort) + if otlp.IsEnabled(pkgconfigsetup.Datadog()) { + grpcPort = core.GetInt(pkgconfigsetup.OTLPTracePort) } // We use a noop set of telemetry settings. This silences all warnings and metrics from the attributes translator. @@ -361,7 +362,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error BindHost: c.ReceiverHost, GRPCPort: grpcPort, MaxRequestBytes: c.MaxRequestBytes, - SpanNameRemappings: coreconfig.Datadog().GetStringMapString("otlp_config.traces.span_name_remappings"), + SpanNameRemappings: pkgconfigsetup.Datadog().GetStringMapString("otlp_config.traces.span_name_remappings"), SpanNameAsResourceName: core.GetBool("otlp_config.traces.span_name_as_resource_name"), ProbabilisticSampling: core.GetFloat64("otlp_config.traces.probabilistic_sampler.sampling_percentage"), AttributesTranslator: attributesTranslator, @@ -384,7 +385,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error if core.GetBool("apm_config.telemetry.enabled") { c.TelemetryConfig.Enabled = true c.TelemetryConfig.Endpoints = []*config.Endpoint{{ - Host: utils.GetMainEndpoint(coreconfig.Datadog(), config.TelemetryEndpointPrefix, "apm_config.telemetry.dd_url"), + Host: utils.GetMainEndpoint(pkgconfigsetup.Datadog(), config.TelemetryEndpointPrefix, "apm_config.telemetry.dd_url"), APIKey: c.Endpoints[0].APIKey, }} c.TelemetryConfig.Endpoints = appendEndpoints(c.TelemetryConfig.Endpoints, "apm_config.telemetry.additional_endpoints") @@ -392,7 +393,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.Obfuscation = new(config.ObfuscationConfig) if core.IsSet("apm_config.obfuscation") { var o config.ObfuscationConfig - err := coreconfig.Datadog().UnmarshalKey("apm_config.obfuscation", &o) + err := pkgconfigsetup.Datadog().UnmarshalKey("apm_config.obfuscation", &o) if err == nil { c.Obfuscation = &o if o.RemoveStackTraces { @@ -413,7 +414,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.Obfuscation.Redis.Enabled = true c.Obfuscation.CreditCards.Enabled = true - // TODO(x): There is an issue with coreconfig.Datadog().IsSet("apm_config.obfuscation"), probably coming from Viper, + // TODO(x): There is an issue with pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation"), probably coming from Viper, // where it returns false even is "apm_config.obfuscation.credit_cards.enabled" is set via an environment // variable, so we need a temporary workaround by specifically setting env. var. accessible fields. if core.IsSet("apm_config.obfuscation.credit_cards.enabled") { @@ -422,71 +423,71 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error if core.IsSet("apm_config.obfuscation.credit_cards.luhn") { c.Obfuscation.CreditCards.Luhn = core.GetBool("apm_config.obfuscation.credit_cards.luhn") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.elasticsearch.enabled") { - c.Obfuscation.ES.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.elasticsearch.enabled") { + c.Obfuscation.ES.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.elasticsearch.keep_values") { - c.Obfuscation.ES.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.elasticsearch.keep_values") { + c.Obfuscation.ES.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") { - c.Obfuscation.ES.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") { + c.Obfuscation.ES.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.opensearch.enabled") { - c.Obfuscation.OpenSearch.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.opensearch.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.opensearch.enabled") { + c.Obfuscation.OpenSearch.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.opensearch.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.opensearch.keep_values") { - c.Obfuscation.OpenSearch.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.opensearch.keep_values") { + c.Obfuscation.OpenSearch.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.opensearch.obfuscate_sql_values") { - c.Obfuscation.OpenSearch.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.opensearch.obfuscate_sql_values") { + c.Obfuscation.OpenSearch.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.obfuscate_sql_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.http.remove_query_string") { - c.Obfuscation.HTTP.RemoveQueryString = coreconfig.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.http.remove_query_string") { + c.Obfuscation.HTTP.RemoveQueryString = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.http.remove_paths_with_digits") { - c.Obfuscation.HTTP.RemovePathDigits = coreconfig.Datadog().GetBool("apm_config.obfuscation.http.remove_paths_with_digits") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.http.remove_paths_with_digits") { + c.Obfuscation.HTTP.RemovePathDigits = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_paths_with_digits") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.memcached.enabled") { - c.Obfuscation.Memcached.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.memcached.enabled") { + c.Obfuscation.Memcached.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.memcached.keep_command") { - c.Obfuscation.Memcached.KeepCommand = coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.memcached.keep_command") { + c.Obfuscation.Memcached.KeepCommand = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.mongodb.enabled") { - c.Obfuscation.Mongo.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.mongodb.enabled") { + c.Obfuscation.Mongo.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.mongodb.keep_values") { - c.Obfuscation.Mongo.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.mongodb.keep_values") { + c.Obfuscation.Mongo.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.mongodb.obfuscate_sql_values") { - c.Obfuscation.Mongo.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.mongodb.obfuscate_sql_values") { + c.Obfuscation.Mongo.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.redis.enabled") { - c.Obfuscation.Redis.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.redis.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.redis.enabled") { + c.Obfuscation.Redis.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.redis.remove_all_args") { - c.Obfuscation.Redis.RemoveAllArgs = coreconfig.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.redis.remove_all_args") { + c.Obfuscation.Redis.RemoveAllArgs = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.remove_stack_traces") { - c.Obfuscation.RemoveStackTraces = coreconfig.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.remove_stack_traces") { + c.Obfuscation.RemoveStackTraces = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.enabled") { - c.Obfuscation.SQLExecPlan.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.enabled") { + c.Obfuscation.SQLExecPlan.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.keep_values") { - c.Obfuscation.SQLExecPlan.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.keep_values") { + c.Obfuscation.SQLExecPlan.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") { - c.Obfuscation.SQLExecPlan.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") { + c.Obfuscation.SQLExecPlan.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.enabled") { - c.Obfuscation.SQLExecPlanNormalize.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.enabled") { + c.Obfuscation.SQLExecPlanNormalize.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") { - c.Obfuscation.SQLExecPlanNormalize.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") { + c.Obfuscation.SQLExecPlanNormalize.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") { - c.Obfuscation.SQLExecPlanNormalize.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") { + c.Obfuscation.SQLExecPlanNormalize.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") } } @@ -503,8 +504,8 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error } } - if coreconfig.Datadog().IsSet("apm_config.filter_tags_regex.require") { - tags := coreconfig.Datadog().GetStringSlice("apm_config.filter_tags_regex.require") + if pkgconfigsetup.Datadog().IsSet("apm_config.filter_tags_regex.require") { + tags := pkgconfigsetup.Datadog().GetStringSlice("apm_config.filter_tags_regex.require") for _, tag := range tags { splitTag := splitTagRegex(tag) if containsKey(c.RequireTags, splitTag.K) { @@ -514,8 +515,8 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.RequireTagsRegex = append(c.RequireTagsRegex, splitTag) } } - if coreconfig.Datadog().IsSet("apm_config.filter_tags_regex.reject") { - tags := coreconfig.Datadog().GetStringSlice("apm_config.filter_tags_regex.reject") + if pkgconfigsetup.Datadog().IsSet("apm_config.filter_tags_regex.reject") { + tags := pkgconfigsetup.Datadog().GetStringSlice("apm_config.filter_tags_regex.reject") for _, tag := range tags { splitTag := splitTagRegex(tag) if containsKey(c.RejectTags, splitTag.K) { @@ -531,7 +532,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error "apm_config.trace_writer": c.TraceWriter, "apm_config.stats_writer": c.StatsWriter, } { - if err := coreconfig.Datadog().UnmarshalKey(key, cfg); err != nil { + if err := pkgconfigsetup.Datadog().UnmarshalKey(key, cfg); err != nil { log.Errorf("Error reading writer config %q: %v", key, err) } } @@ -551,7 +552,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error // undocumented deprecated if core.IsSet("apm_config.analyzed_rate_by_service") { rateByService := make(map[string]float64) - if err := coreconfig.Datadog().UnmarshalKey("apm_config.analyzed_rate_by_service", &rateByService); err != nil { + if err := pkgconfigsetup.Datadog().UnmarshalKey("apm_config.analyzed_rate_by_service", &rateByService); err != nil { return err } c.AnalyzedRateByServiceLegacy = rateByService @@ -588,7 +589,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error } c.Site = core.GetString("site") if c.Site == "" { - c.Site = coreconfig.DefaultSite + c.Site = pkgconfigsetup.DefaultSite } if k := "use_dogstatsd"; core.IsSet(k) { c.StatsdEnabled = core.GetBool(k) @@ -661,7 +662,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error // backwards compatibility with Agent 5. These should eventually be removed. // TODO(x): remove them gradually or fully in a future release. func loadDeprecatedValues(c *config.AgentConfig) error { - cfg := coreconfig.Datadog() + cfg := pkgconfigsetup.Datadog() if cfg.IsSet("apm_config.api_key") { c.Endpoints[0].APIKey = utils.SanitizeAPIKey(cfg.GetString("apm_config.api_key")) } @@ -842,7 +843,7 @@ func SetHandler() http.Handler { if lvl == "warning" { lvl = "warn" } - if err := utils.SetLogLevel(lvl, coreconfig.Datadog(), model.SourceAgentRuntime); err != nil { + if err := utils.SetLogLevel(lvl, pkgconfigsetup.Datadog(), model.SourceAgentRuntime); err != nil { httpError(w, http.StatusInternalServerError, err) return } diff --git a/devenv/scripts/Install-DevEnv.ps1 b/devenv/scripts/Install-DevEnv.ps1 index d35e0df5b7a65..c9fff932ce2a7 100644 --- a/devenv/scripts/Install-DevEnv.ps1 +++ b/devenv/scripts/Install-DevEnv.ps1 @@ -45,7 +45,7 @@ Write-Host -ForegroundColor Yellow -BackgroundColor DarkGreen '- Installing Gola $ErrorActionPreference = 'Stop' $ProgressPreference = 'SilentlyContinue' -$go_version = "1.22.6" +$go_version = "1.22.7" Write-Host -ForegroundColor Green "Installing go $go_version" $gozip = "https://dl.google.com/go/go$go_version.windows-amd64.zip" diff --git a/docs/dev/agent_dev_env.md b/docs/dev/agent_dev_env.md index a4b41fc748379..bb1475f6d0171 100644 --- a/docs/dev/agent_dev_env.md +++ b/docs/dev/agent_dev_env.md @@ -12,26 +12,26 @@ development files to be available in the dev env. The Agent can embed Python2 and/or Python3, you will need development files for all versions you want to support. -If you're on OSX/macOS, installing Python 2.7 and/or 3.11 with [Homebrew](https://brew.sh) +If you're on OSX/macOS, installing Python 2.7 and/or 3.12 with [Homebrew](https://brew.sh) brings along all the development files needed: **Please note that not using Python versions explicitly supported, you may have problems running the built Agent's Python checks, especially if using a virtualenv. -At this time, only Python 3.11 is confirmed to work as expected in the development +At this time, only Python 3.12 is confirmed to work as expected in the development environment.** ``` brew install python@2 -brew install python@3.11 +brew install python@3.12 ``` On Linux, depending on the distribution, you might need to explicitly install the development files, for example on Ubuntu: ``` sudo apt-get install python2.7-dev -sudo apt-get install python3.11-dev +sudo apt-get install python3.12-dev ``` -On Windows, install Python 2.7 and/or 3.11 via the [official installer](https://www.python.org/downloads/). +On Windows, install Python 2.7 and/or 3.12 via the [official installer](https://www.python.org/downloads/). #### Python Dependencies @@ -118,7 +118,7 @@ sure to work is `virtualenv`.** If using virtual environments when running the built Agent, you may need to override the built Agent's search path for Python check packages using the `PYTHONPATH` variable (your target path must have the [pre-requisite core integration packages installed](https://datadoghq.dev/integrations-core/setup/) though). ```sh -PYTHONPATH="./venv/lib/python3.11/site-packages:$PYTHONPATH" ./agent run ... +PYTHONPATH="./venv/lib/python3.12/site-packages:$PYTHONPATH" ./agent run ... ``` See also some notes in [./checks](https://github.com/DataDog/datadog-agent/tree/main/docs/dev/checks) about running custom python checks. @@ -138,7 +138,7 @@ This procedure ensures you not only get the correct version of `invoke`, but als ### Golang -You must [install Golang](https://golang.org/doc/install) version `1.22.6` or +You must [install Golang](https://golang.org/doc/install) version `1.22.7` or higher. Make sure that `$GOPATH/bin` is in your `$PATH` otherwise `invoke` cannot use any additional tool it might need. diff --git a/docs/public/setup.md b/docs/public/setup.md index 5f3b8d52f362c..dff2dea51a1c0 100644 --- a/docs/public/setup.md +++ b/docs/public/setup.md @@ -101,7 +101,7 @@ This procedure ensures you not only get the correct version of `invoke`, but als ### Golang -You must [install Golang](https://golang.org/doc/install) version `1.22.6` or higher. Make sure that `$GOPATH/bin` is in your `$PATH` otherwise `invoke` cannot use any additional tool it might need. +You must [install Golang](https://golang.org/doc/install) version `1.22.7` or higher. Make sure that `$GOPATH/bin` is in your `$PATH` otherwise `invoke` cannot use any additional tool it might need. !!! note Versions of Golang that aren't an exact match to the version specified in our build images (see e.g. [here](https://github.com/DataDog/datadog-agent-buildimages/blob/c025473ee467ee6d884d532e4c12c7d982ce8fe1/circleci/Dockerfile#L43)) may not be able to build the agent and/or the [rtloader](https://github.com/DataDog/datadog-agent/tree/main/rtloader) binary properly. diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000000..e4807a8251de5 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,11 @@ +# Datadog Agent Example Configurations + +This is a collection of example `datadog-agent.yaml` files to get you started with Datadog. Consult the +[config_template](https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml) for a full list of configuration options. + +To use these add your `api_key` and if necessary update the `site`. Add an `env` +tag to the `env:` key and any other required tags. If these parameters are set +with environment variables, they can be commented out. + +Add any other configuration settings needed, then you can copy the file to `/etc/datadog-agent/datadog.yaml` +for Linux systems or `%ProgramData%\Datadog\datadog.yaml` for Windows and restart the Datadog Agent. \ No newline at end of file diff --git a/examples/agent_apm.yaml b/examples/agent_apm.yaml new file mode 100644 index 0000000000000..df644494b8b76 --- /dev/null +++ b/examples/agent_apm.yaml @@ -0,0 +1,20 @@ +## Minimal configuration to enable Datadog to ship metrics and traces. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +apm_config: + enabled: true \ No newline at end of file diff --git a/examples/agent_apm_liveprocess.yaml b/examples/agent_apm_liveprocess.yaml new file mode 100644 index 0000000000000..b7c53dee72735 --- /dev/null +++ b/examples/agent_apm_liveprocess.yaml @@ -0,0 +1,26 @@ +## Minimal configuration to enable Datadog to ship metrics, traces and Live Processes. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +logs_enabled: true + +process_config: + process_collection: + enabled: true + +apm_config: + enabled: true \ No newline at end of file diff --git a/examples/agent_apm_logs.yaml b/examples/agent_apm_logs.yaml new file mode 100644 index 0000000000000..b264ca42fa1dd --- /dev/null +++ b/examples/agent_apm_logs.yaml @@ -0,0 +1,22 @@ +## Minimal configuration to enable Datadog to ship metrics, logs and traces. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +apm_config: + enabled: true + +logs_enabled: true \ No newline at end of file diff --git a/examples/agent_apm_logs_live_process.yaml b/examples/agent_apm_logs_live_process.yaml new file mode 100644 index 0000000000000..c1773a195577d --- /dev/null +++ b/examples/agent_apm_logs_live_process.yaml @@ -0,0 +1,26 @@ +## Minimal configuration to enable Datadog to ship metrics, logs, APM traces and live processes. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +apm_config: + enabled: true + +logs_enabled: true + +process_config: + process_collection: + enabled: true \ No newline at end of file diff --git a/examples/agent_liveprocess.yaml b/examples/agent_liveprocess.yaml new file mode 100644 index 0000000000000..abe7f780282e1 --- /dev/null +++ b/examples/agent_liveprocess.yaml @@ -0,0 +1,24 @@ +## Minimal configuration to enable Datadog to ship metrics and live processes. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +process_config: + process_collection: + enabled: true + +apm_config: + enabled: false \ No newline at end of file diff --git a/examples/agent_logs.yaml b/examples/agent_logs.yaml new file mode 100644 index 0000000000000..e632436f43ac7 --- /dev/null +++ b/examples/agent_logs.yaml @@ -0,0 +1,22 @@ +## Minimal configuration to enable Datadog to ship metrics and logs. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +logs_enabled: true + +apm_config: + enabled: false \ No newline at end of file diff --git a/examples/agent_logs_liveprocess.yaml b/examples/agent_logs_liveprocess.yaml new file mode 100644 index 0000000000000..87e9bdf502c12 --- /dev/null +++ b/examples/agent_logs_liveprocess.yaml @@ -0,0 +1,26 @@ +## Minimal configuration to enable Datadog to ship metrics, logs and live processes. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +logs_enabled: true + +process_config: + process_collection: + enabled: true + +apm_config: + enabled: false \ No newline at end of file diff --git a/examples/agent_minimal.yaml b/examples/agent_minimal.yaml new file mode 100644 index 0000000000000..4ae15e33bf5eb --- /dev/null +++ b/examples/agent_minimal.yaml @@ -0,0 +1,19 @@ +## Minimal configuration to enable Datadog to ship metrics only. + +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +apm_config: + enabled: false diff --git a/examples/agent_otel.yaml b/examples/agent_otel.yaml new file mode 100644 index 0000000000000..a19294ffc98a6 --- /dev/null +++ b/examples/agent_otel.yaml @@ -0,0 +1,27 @@ +## Minimal configuration to enable Datadog to ship metrics and enable OpenTelemetry. + +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +oltp_config: + receiver: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +apm_config: + enabled: false diff --git a/flakes.yaml b/flakes.yaml index 6887638a60fc5..a3d6f71ed04a2 100644 --- a/flakes.yaml +++ b/flakes.yaml @@ -11,7 +11,6 @@ test/new-e2e/tests/containers: - TestECSSuite/TestCPU/metric___container.cpu.usage{^ecs_container_name:stress-ng$} - TestEKSSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - TestKindSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - - TestECSSuite test/new-e2e/tests/installer: - TestPackages/upgrade_scenario_ubuntu_22_04_x86_64/TestUpgradeSuccessful diff --git a/go.mod b/go.mod index b1f17ce865430..594373618ac6d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent -go 1.22.0 +go 1.22.5 // v0.8.0 was tagged long ago, and appared on pkg.go.dev. We do not want any tagged version // to appear there. The trick to accomplish this is to make a new version (in this case v0.9.0) @@ -26,6 +26,7 @@ replace ( replace ( github.com/DataDog/datadog-agent/cmd/agent/common/path => ./cmd/agent/common/path/ github.com/DataDog/datadog-agent/comp/api/api/def => ./comp/api/api/def/ + github.com/DataDog/datadog-agent/comp/api/authtoken => ./comp/api/authtoken github.com/DataDog/datadog-agent/comp/core/config => ./comp/core/config/ github.com/DataDog/datadog-agent/comp/core/flare/builder => ./comp/core/flare/builder github.com/DataDog/datadog-agent/comp/core/flare/types => ./comp/core/flare/types @@ -47,8 +48,6 @@ replace ( github.com/DataDog/datadog-agent/comp/netflow/payload => ./comp/netflow/payload github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def => ./comp/otelcol/collector-contrib/def github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl => ./comp/otelcol/collector-contrib/impl - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def => ./comp/otelcol/configstore/def - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl => ./comp/otelcol/configstore/impl github.com/DataDog/datadog-agent/comp/otelcol/converter/def => ./comp/otelcol/converter/def github.com/DataDog/datadog-agent/comp/otelcol/converter/impl => ./comp/otelcol/converter/impl github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def => ./comp/otelcol/ddflareextension/def/ @@ -59,6 +58,7 @@ replace ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter => ./comp/otelcol/otlp/components/exporter/logsagentexporter github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter => ./comp/otelcol/otlp/components/exporter/serializerexporter github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient => ./comp/otelcol/otlp/components/metricsclient + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor => ./comp/otelcol/otlp/components/processor/infraattributesprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor => ./comp/otelcol/otlp/components/statsprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil => ./comp/otelcol/otlp/testutil github.com/DataDog/datadog-agent/comp/serializer/compression => ./comp/serializer/compression @@ -72,8 +72,11 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ./pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ./pkg/config/mock/ github.com/DataDog/datadog-agent/pkg/config/model => ./pkg/config/model/ + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ./pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/remote => ./pkg/config/remote/ github.com/DataDog/datadog-agent/pkg/config/setup => ./pkg/config/setup/ + github.com/DataDog/datadog-agent/pkg/config/structure => ./pkg/config/structure/ + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ./pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ./pkg/config/utils/ github.com/DataDog/datadog-agent/pkg/errors => ./pkg/errors github.com/DataDog/datadog-agent/pkg/gohai => ./pkg/gohai @@ -149,21 +152,21 @@ require ( github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 github.com/DataDog/datadog-go/v5 v5.5.0 github.com/DataDog/datadog-operator v1.8.0-rc.1 github.com/DataDog/ebpf-manager v0.7.1 github.com/DataDog/gopsutil v1.2.2 - github.com/DataDog/nikos v1.12.4 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 + github.com/DataDog/nikos v1.12.5 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 github.com/DataDog/sketches-go v1.4.6 github.com/DataDog/viper v1.13.5 github.com/DataDog/watermarkpodautoscaler v0.6.1 - github.com/DataDog/zstd v1.5.5 + github.com/DataDog/zstd v1.5.6 github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f // indirect github.com/Masterminds/semver/v3 v3.3.0 github.com/Masterminds/sprig/v3 v3.3.0 // indirect @@ -190,7 +193,7 @@ require ( github.com/coreos/go-semver v0.3.1 github.com/coreos/go-systemd v22.5.0+incompatible github.com/cri-o/ocicni v0.4.3 - github.com/cyphar/filepath-securejoin v0.3.1 + github.com/cyphar/filepath-securejoin v0.3.2 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/docker/docker v25.0.6+incompatible github.com/docker/go-connections v0.5.0 @@ -269,7 +272,7 @@ require ( github.com/streadway/amqp v1.1.0 github.com/stretchr/testify v1.9.0 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 - github.com/tinylib/msgp v1.2.0 + github.com/tinylib/msgp v1.2.1 github.com/twmb/murmur3 v1.1.8 github.com/uptrace/bun v1.2.1 github.com/uptrace/bun/dialect/pgdialect v1.2.1 @@ -287,13 +290,13 @@ require ( go.opentelemetry.io/collector/component v0.104.0 go.opentelemetry.io/collector/confmap v0.104.0 go.opentelemetry.io/collector/exporter v0.104.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 + go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 go.opentelemetry.io/collector/pdata v1.11.0 go.opentelemetry.io/collector/processor/batchprocessor v0.104.0 go.opentelemetry.io/collector/receiver v0.104.0 go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/dig v1.18.0 @@ -301,17 +304,17 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 go4.org/netipx v0.0.0-20220812043211-3cc044ffd68d - golang.org/x/arch v0.9.0 - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa - golang.org/x/net v0.28.0 + golang.org/x/arch v0.10.0 + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 + golang.org/x/net v0.29.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 - golang.org/x/text v0.17.0 + golang.org/x/sys v0.25.0 + golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 - golang.org/x/tools v0.24.0 + golang.org/x/tools v0.25.0 golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 - google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/grpc v1.66.0 + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.2 google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a google.golang.org/protobuf v1.34.2 gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 @@ -338,10 +341,10 @@ require ( ) require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.8 // indirect - cloud.google.com/go/storage v1.41.0 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.2.0 // indirect + cloud.google.com/go/storage v1.43.0 // indirect code.cloudfoundry.org/cfhttp/v2 v2.0.0 // indirect code.cloudfoundry.org/clock v1.0.0 // indirect code.cloudfoundry.org/consuladapter v0.0.0-20200131002136-ac1daf48ba97 // indirect @@ -354,7 +357,7 @@ require ( github.com/BurntSushi/toml v1.3.2 // indirect github.com/DataDog/aptly v1.5.3 // indirect github.com/DataDog/extendeddaemonset v0.10.0-rc.4 // indirect - github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect + github.com/DataDog/go-tuf v1.1.0-0.5.2 github.com/DataDog/gostackparse v0.7.0 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DisposaBoy/JsonConfigReader v0.0.0-20201129172854-99cf318d67e7 // indirect @@ -428,7 +431,7 @@ require ( github.com/google/licenseclassifier/v2 v2.0.0 // indirect github.com/google/uuid v1.6.0 github.com/google/wire v0.6.0 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -553,27 +556,27 @@ require ( go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 // indirect go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20220522111935-c3bc4116dcd1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/consumer v0.104.0 + go.opentelemetry.io/collector/consumer v0.104.0 // indirect go.opentelemetry.io/collector/featuregate v1.11.0 - go.opentelemetry.io/collector/semconv v0.104.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/collector/semconv v0.104.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 + go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel/trace v1.29.0 go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/mod v0.20.0 - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/mod v0.21.0 + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/term v0.24.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/api v0.185.0 // indirect + google.golang.org/api v0.197.0 // indirect google.golang.org/appengine v1.6.8 // indirect gopkg.in/Knetic/govaluate.v3 v3.0.0 // indirect gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect @@ -596,43 +599,44 @@ require ( ) require ( + github.com/DataDog/datadog-agent/comp/api/authtoken v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2 github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 github.com/containerd/containerd/api v1.7.19 github.com/containerd/errdefs v0.1.0 github.com/distribution/reference v0.6.0 github.com/jellydator/ttlcache/v3 v3.3.0 github.com/kouhin/envflag v0.0.0-20150818174321-0e9a86061649 github.com/lorenzosaino/go-sysctl v0.3.1 + go.opentelemetry.io/collector/config/configtelemetry v0.104.0 ) require ( github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/DataDog/agent-payload/v5 v5.0.130 + github.com/DataDog/agent-payload/v5 v5.0.132 github.com/DataDog/datadog-agent/cmd/agent/common/path v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/api/api/def v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/core/config v0.56.2 + github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.2 github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/log/def v0.58.0-devel github.com/DataDog/datadog-agent/comp/core/log/impl v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/core/log/impl-trace v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/log/mock v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 + github.com/DataDog/datadog-agent/comp/core/log/mock v0.58.0-devel + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/core/status/statusimpl v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/core/tagger/types v0.56.2 github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/def v0.56.2 github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/netflow/payload v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/converter/impl v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.56.0-rc.3 @@ -641,6 +645,7 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/serializer/compression v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/trace/agent/def v0.56.0-rc.3 @@ -648,14 +653,14 @@ require ( github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/api v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 - github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 + github.com/DataDog/datadog-agent/pkg/api v0.56.2 + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel - github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/config/remote v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 - github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/utils v0.56.2 github.com/DataDog/datadog-agent/pkg/errors v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 @@ -684,26 +689,26 @@ require ( github.com/DataDog/datadog-agent/pkg/util/cache v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/containers/image v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 github.com/DataDog/datadog-agent/pkg/util/flavor v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.2 github.com/DataDog/datadog-agent/pkg/util/grpc v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log/setup v1.0.0 - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/util/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/uuid v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 - github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 + github.com/DataDog/datadog-agent/pkg/version v0.57.0 github.com/DataDog/go-libddwaf/v3 v3.3.0 - github.com/DataDog/go-sqllexer v0.0.14 + github.com/DataDog/go-sqllexer v0.0.15 github.com/Datadog/dublin-traceroute v0.0.1 github.com/aquasecurity/trivy v0.49.2-0.20240227072422-e1ea02c7b80d github.com/aws/aws-sdk-go-v2/service/kms v1.34.1 @@ -724,7 +729,7 @@ require ( github.com/judwhite/go-svc v1.2.1 github.com/kr/pretty v0.3.1 // todo: update datadog connector with breaking changes from https://github.com/DataDog/datadog-agent/pull/26347. - github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.104.0 github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 github.com/prometheus-community/pro-bing v0.4.1 github.com/rickar/props v1.0.0 @@ -749,8 +754,8 @@ require ( ) require ( - cloud.google.com/go/auth v0.5.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect code.cloudfoundry.org/go-diodes v0.0.0-20240604201846-c756bfed2ed3 // indirect code.cloudfoundry.org/go-loggregator v7.4.0+incompatible // indirect code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 // indirect @@ -765,16 +770,18 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Code-Hex/go-generics-cache v1.3.1 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.2 // indirect github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.0.0-20240525065430-d0b647bcb646 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.17.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 // indirect github.com/Intevation/gval v1.3.0 // indirect github.com/Intevation/jsonpath v0.2.1 // indirect @@ -790,7 +797,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect - github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bitnami/go-version v0.0.0-20231130084017-bb00604d650c // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect @@ -820,7 +826,7 @@ require ( github.com/go-openapi/spec v0.20.14 // indirect github.com/go-resty/resty/v2 v2.12.0 // indirect github.com/go-test/deep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/goccy/go-yaml v1.11.0 // indirect @@ -831,16 +837,14 @@ require ( github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/gophercloud/gophercloud v1.8.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect - github.com/hashicorp/go-getter v1.7.5 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect - github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-sockaddr v1.0.6 // indirect @@ -866,7 +870,6 @@ require ( github.com/lunixbochs/struc v0.0.0-20200707160740-784aaebc1d40 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/microsoft/go-rustaudit v0.0.0-20220808201409-204dfee52032 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.3.0 // indirect @@ -966,18 +969,14 @@ require ( go.opentelemetry.io/collector/config/confignet v0.104.0 // indirect go.opentelemetry.io/collector/config/configopaque v1.11.0 // indirect go.opentelemetry.io/collector/config/configretry v1.11.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect go.opentelemetry.io/collector/config/configtls v0.104.0 // indirect go.opentelemetry.io/collector/config/internal v0.104.0 // indirect go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 // indirect go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 // indirect go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 // indirect go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 // indirect go.opentelemetry.io/collector/filter v0.104.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.104.0 // indirect - go.opentelemetry.io/collector/pdata/testdata v0.104.0 // indirect go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.104.0 // indirect go.opentelemetry.io/collector/receiver/nopreceiver v0.104.0 // indirect go.opentelemetry.io/contrib/config v0.7.0 // indirect @@ -991,8 +990,8 @@ require ( golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gotest.tools/v3 v3.5.1 // indirect @@ -1016,11 +1015,13 @@ replace github.com/vishvananda/netlink => github.com/DataDog/netlink v1.0.1-0.20 // Cannot be upgraded to 0.26 without lossing CRI API v1alpha2 replace k8s.io/cri-api => k8s.io/cri-api v0.25.5 +replace github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector => github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 + // Use custom Trivy fork to reduce binary size // Pull in replacements needed by upstream Trivy replace ( // Maps to Trivy fork https://github.com/DataDog/trivy/commits/use-fs-main-dd/ - github.com/aquasecurity/trivy => github.com/DataDog/trivy v0.0.0-20240729123106-0d652d4a5630 + github.com/aquasecurity/trivy => github.com/DataDog/trivy v0.0.0-20240925151901-6736b8661190 github.com/saracen/walker => github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950 // testcontainers-go has a bug with versions v0.25.0 and v0.26.0 // ref: https://github.com/testcontainers/testcontainers-go/issues/1782 diff --git a/go.sum b/go.sum index a131d6f6b7cd3..6d404eaeecda8 100644 --- a/go.sum +++ b/go.sum @@ -37,8 +37,8 @@ cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFO cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -100,10 +100,10 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -184,8 +184,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -320,8 +320,8 @@ cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGE cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= -cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -354,6 +354,8 @@ cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeN cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -540,8 +542,8 @@ cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeL cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= -cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -685,8 +687,8 @@ github.com/CycloneDX/cyclonedx-go v0.8.0 h1:FyWVj6x6hoJrui5uRQdYZcSievw3Z32Z88uY github.com/CycloneDX/cyclonedx-go v0.8.0/go.mod h1:K2bA+324+Og0X84fA8HhN2X066K7Bxz4rpMQ4ZhjtSk= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/DataDog/agent-payload/v5 v5.0.130 h1:pVMRVKkUMmw2vOpmP92TO9jrS0om3K0uKteXHcy/6v0= -github.com/DataDog/agent-payload/v5 v5.0.130/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs= +github.com/DataDog/agent-payload/v5 v5.0.132 h1:F9wy+iyAgN2QmkEsOlPp3RrQ4vOb4T6k3BXhjSpELS4= +github.com/DataDog/agent-payload/v5 v5.0.132/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs= github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0= github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= github.com/DataDog/aptly v1.5.3 h1:oLsRvjuXSVM4ia0N83dU3KiQeiJ6BaszYbTZOkSfDlw= @@ -712,8 +714,8 @@ github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302 github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe/go.mod h1:90sqV0j7E8wYCyqIp5d9HmYWLTFQttqPFFtNYDyAybQ= github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4= github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec= -github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= -github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.15 h1:rUUu52dP8EQhJLnUw0MIAxZp0BQx2fOTuMztr3vtHUU= +github.com/DataDog/go-sqllexer v0.0.15/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k= @@ -728,32 +730,32 @@ github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYx github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= github.com/DataDog/netlink v1.0.1-0.20240223195320-c7a4f832a3d1 h1:HnvrdC79xJ+RPxTQdhDDwxblTNWhJUKeyTPsuyaOnxQ= github.com/DataDog/netlink v1.0.1-0.20240223195320-c7a4f832a3d1/go.mod h1:whJevzBpTrid75eZy99s3DqCmy05NfibNaF2Ol5Ox5A= -github.com/DataDog/nikos v1.12.4 h1:UBo2v1Std4GvPHalKs22+1kcM4tXvMACREe5k4QMzf0= -github.com/DataDog/nikos v1.12.4/go.mod h1:ovDmd9Jz/ZJwP39wgQmFUXZZzeDYJtFgsY7K1OPstWk= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0 h1:KNiq6ofE5BBMQjl7w9fftg8z44C9z51w7qOWIKs5SCg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 h1:FaUFQE8IuaNdpOQGIhoy2h58v8AVND+yZG3gVqKAwLQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.17.0 h1:K6SIJy7ECWdQMWJMo60oJNvduOeIJ/t/6VDbHWDd/oM= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.17.0/go.mod h1:L7QrJ1emQ+rcXAs2JW5b+eu72G9A4yku35Ia4kLrdcg= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 h1:Fija8Qo0z/HngskYyBpMqmJKM2ejNr1NfXUyWszFDAw= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0/go.mod h1:lNu6vfFNCV/tyWxs8x8nCN1TqK+bPeI2dbnlwFTs8VA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 h1:x6re32f8gQ8fdCllywQyAbxQuXNrgxeimpLBfvwA97g= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0/go.mod h1:R84ZVbxKSgMxzvJro/MftVrlkGm2C2gndUhV35wyR8A= +github.com/DataDog/nikos v1.12.5 h1:O4kdkUkL0nws9+rdeLKQLIN5+lC542eKeUY1C5utQBA= +github.com/DataDog/nikos v1.12.5/go.mod h1:2KZ5BIt/7gYYg/hI8F1U+Urq+LfeZOeDbFnrrBmoYVg= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= -github.com/DataDog/trivy v0.0.0-20240729123106-0d652d4a5630 h1:GA5L0gJsKLmuzGRGOqC3koKod7fmSL6C8GEDrBceJBI= -github.com/DataDog/trivy v0.0.0-20240729123106-0d652d4a5630/go.mod h1:xmc7xCb5KSg2mFbztyInH8ciotVbad9SOmGFClgD0cU= +github.com/DataDog/trivy v0.0.0-20240925151901-6736b8661190 h1:JDfYwI7ELSnWDgn0hj7afbByrLbFtztkvgQf69H+fv4= +github.com/DataDog/trivy v0.0.0-20240925151901-6736b8661190/go.mod h1:hLiUAm3v175M5jWbq34TdGmX6mvHIJY7FMuZ3wBugtw= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950 h1:2imDajw3V85w1iqHsuXN+hUBZQVF+r9eME8tsPq/HpA= github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950/go.mod h1:FU+7qU8DeQQgSZDmmThMJi93kPkLFgy0oVAcLxurjIk= github.com/DataDog/watermarkpodautoscaler v0.6.1 h1:KEj10Cm8wO/36lEOgqjgDfIMMpMPReY/+bDacWe7Adw= github.com/DataDog/watermarkpodautoscaler v0.6.1/go.mod h1:iaEXqganxe2zHi3pyJFuf59X8srmGFoikPtjWxMW9mU= -github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= -github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f/go.mod h1:oXfOhM/Kr8OvqS6tVqJwxPBornV0yrx3bc+l0BDr7PQ= github.com/Datadog/dublin-traceroute v0.0.1 h1:xh5xfA25gjrpRK72lQotL79S4vAvxpc4UOQdR22p2IY= @@ -886,7 +888,6 @@ github.com/awalterschulze/gographviz v2.0.3+incompatible/go.mod h1:GEV5wmg4YquNw github.com/aws/aws-lambda-go v1.37.0 h1:WXkQ/xhIcXZZ2P5ZBEw+bbAKeCEcb5NtiYpSwVVzIXg= github.com/aws/aws-lambda-go v1.37.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.53.11 h1:KcmduYvX15rRqt4ZU/7jKkmDxU/G87LJ9MUI0yQJh00= github.com/aws/aws-sdk-go v1.53.11/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -970,8 +971,6 @@ github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bhmj/jsonslice v0.0.0-20200323023432-92c3edaad8e2 h1:11xwzvvHBTyUMCD7infJV2SXSaVyp9ZXK9QgfV6Jfss= github.com/bhmj/jsonslice v0.0.0-20200323023432-92c3edaad8e2/go.mod h1:blvNODZOz8uOvDJzGiXzoi8QlzcAhA57sMnKx1D18/k= @@ -1017,7 +1016,6 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheggaaa/pb v1.0.10/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= -github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo= github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= github.com/cheggaaa/pb/v3 v3.1.4 h1:DN8j4TVVdKu3WxVwcRKu0sG00IIU6FewoABZzXbRQeo= @@ -1120,8 +1118,8 @@ github.com/cri-o/ocicni v0.4.3 h1:BfnrZrtr/F+o+b+yOguB1o6I4OzjieF3k3dN4MrsCJA= github.com/cri-o/ocicni v0.4.3/go.mod h1:RzIKSln5AT65hyyfGj3/gsfCpjiY1Y6rVK51Uc5YNzk= github.com/csaf-poc/csaf_distribution/v3 v3.0.0 h1:ob9+Fmpff0YWgTP3dYaw7G2hKQ9cegh9l3zksc+q3sM= github.com/csaf-poc/csaf_distribution/v3 v3.0.0/go.mod h1:uilCTiNKivq+6zrDvjtZaUeLk70oe21iwKivo6ILwlQ= -github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE= -github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= +github.com/cyphar/filepath-securejoin v0.3.2 h1:QhZu5AxQ+o1XZH0Ye05YzvJ0kAdK6VQc0z9NNMek7gc= +github.com/cyphar/filepath-securejoin v0.3.2/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= github.com/datadog/trivy-db v0.0.0-20240228172000-42caffdaee3f h1:IFB3J+f0m2e7nZjPTqvzLrrb6dVU6BQrsGx/7Tmm8Xk= github.com/datadog/trivy-db v0.0.0-20240228172000-42caffdaee3f/go.mod h1:cj9/QmD9N3OZnKQMp+/DvdV+ym3HyIkd4e+F0ZM3ZGs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -1364,8 +1362,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -1522,8 +1520,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1538,8 +1536,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1554,8 +1552,8 @@ github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38 github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -1613,8 +1611,6 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4= -github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= @@ -1639,8 +1635,6 @@ github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= @@ -1656,7 +1650,6 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= @@ -1800,7 +1793,6 @@ github.com/kjk/lzma v0.0.0-20161016003348-3fd93898850d/go.mod h1:phT/jsRPBAEqjAi github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -1901,7 +1893,6 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= @@ -1913,7 +1904,6 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= @@ -2496,8 +2486,8 @@ github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= -github.com/tinylib/msgp v1.2.0 h1:0uKB/662twsVBpYUPbokj4sTSKhWFKB7LopO2kWK8lY= -github.com/tinylib/msgp v1.2.0/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro= +github.com/tinylib/msgp v1.2.1 h1:6ypy2qcCznxpP4hpORzhtXyTqrBs7cfM9MCCWY8zsmU= +github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -2524,7 +2514,6 @@ github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLY github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/uptrace/bun v1.2.1 h1:2ENAcfeCfaY5+2e7z5pXrzFKy3vS8VXvkCag6N2Yzfk= @@ -2695,8 +2684,6 @@ go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBL go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw= go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 h1:MaBTuHmK/HAQ+/rLTrGf3tazKum8Sic3/CaXgNr5xnc= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0/go.mod h1:sXZhACvds6z71cf2fzKrojMgdJItJZxeClKlF/PI/l8= go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 h1:33JeCQiJbvhSXFqQ34R4ole/wD4iHtF5LYp2GziYVnY= go.opentelemetry.io/collector/exporter/nopexporter v0.104.0/go.mod h1:73afhI8uc5NKAl9pMJlgQQ46Ck9e7nQ2zZGXHHSzuwo= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= @@ -2741,16 +2728,16 @@ go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5 go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= @@ -2769,14 +2756,14 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2821,8 +2808,8 @@ go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1: go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 h1:lGdhQUN/cnWdSH3291CUuxSEqc+AsGTiDxPP3r2J0l4= go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= -golang.org/x/arch v0.9.0 h1:ub9TgUInamJ8mrZIGlBG6/4TqWeMszd4N8lNorbrr6k= -golang.org/x/arch v0.9.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.10.0 h1:S3huipmSclq3PJMNe76NGwkBR504WFkQ5dhzWzP8ZW8= +golang.org/x/arch v0.10.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -2851,8 +2838,8 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2868,8 +2855,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 h1:jWGQJV4niP+CCmFW9ekjA9Zx8vYORzOUH2/Nl5WPuLQ= golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= @@ -2921,8 +2908,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -3007,8 +2994,8 @@ golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -3035,7 +3022,6 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= @@ -3043,8 +3029,8 @@ golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4 golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -3193,8 +3179,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -3211,8 +3197,8 @@ golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -3232,8 +3218,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -3325,8 +3311,8 @@ golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -3410,8 +3396,8 @@ google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjY google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.185.0 h1:ENEKk1k4jW8SmmaT6RE+ZasxmxezCrD5Vw4npvr+pAU= -google.golang.org/api v0.185.0/go.mod h1:HNfvIkJGlgrIlrbYkAm9W9IdkmKZjOTVh33YltygGbg= +google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3530,7 +3516,6 @@ google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= @@ -3561,21 +3546,21 @@ google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 h1:CUiCqkPw1nNrNQzCCG4WA65m0nAmQiwXHpub3dNyruU= -google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4/go.mod h1:EvuUDCulqGgV80RvP1BHuom+smhX4qtlhnNatHuroGQ= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -3622,8 +3607,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a h1:p51n6zkL483uumoZhCSGtHCem9kDeU05G5jX/wYI9gw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o= @@ -3661,7 +3646,6 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/omnibus/config/patches/datadog-agent-integrations-py3/remove-maxfile-maxpath-psutil.patch b/omnibus/config/patches/datadog-agent-integrations-py3/remove-maxfile-maxpath-psutil.patch deleted file mode 100644 index c92eb2f7f2859..0000000000000 --- a/omnibus/config/patches/datadog-agent-integrations-py3/remove-maxfile-maxpath-psutil.patch +++ /dev/null @@ -1,30 +0,0 @@ -Partially reverts https://github.com/giampaolo/psutil/pull/1863 to remove the maxpath / maxfile fetch -diff --git a/psutil/__init__.py b/psutil/__init__.py -index 1a113bc3..ce962a61 100644 ---- a/psutil/__init__.py -+++ b/psutil/__init__.py -@@ -2012,23 +2012,7 @@ def disk_partitions(all=False): - If *all* parameter is False return physical devices only and ignore - all others. - """ -- def pathconf(path, name): -- try: -- return os.pathconf(path, name) -- except (OSError, AttributeError): -- pass -- -- ret = _psplatform.disk_partitions(all) -- if POSIX: -- new = [] -- for item in ret: -- nt = item._replace( -- maxfile=pathconf(item.mountpoint, 'PC_NAME_MAX'), -- maxpath=pathconf(item.mountpoint, 'PC_PATH_MAX')) -- new.append(nt) -- return new -- else: -- return ret -+ return _psplatform.disk_partitions(all) - - - def disk_io_counters(perdisk=False, nowrap=True): \ No newline at end of file diff --git a/omnibus/config/patches/libgcrypt/0001-disable-tests-building.patch b/omnibus/config/patches/libgcrypt/0001-disable-tests-building.patch new file mode 100644 index 0000000000000..e1fa865b3c5c7 --- /dev/null +++ b/omnibus/config/patches/libgcrypt/0001-disable-tests-building.patch @@ -0,0 +1,27 @@ +From 738666ffbe14d735fe5445f41d23aae4c9c6b3b8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Hugo=20Beauz=C3=A9e-Luyssen?= +Date: Thu, 1 Aug 2024 11:59:24 +0200 +Subject: [PATCH] disable tests building + +--- + Makefile.am | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/Makefile.am b/Makefile.am +index d60804ee..980d6b84 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -41,8 +41,8 @@ doc = + endif + + +-DIST_SUBDIRS = m4 compat mpi cipher random src doc tests +-SUBDIRS = compat mpi cipher random src $(doc) tests ++DIST_SUBDIRS = m4 compat mpi cipher random src ++SUBDIRS = compat mpi cipher random src + + EXTRA_DIST = autogen.sh autogen.rc README.GIT LICENSES \ + ChangeLog-2011 build-aux/ChangeLog-2011 doc/ChangeLog-2011 \ +-- +2.34.1 + diff --git a/omnibus/config/patches/python2/0001-disable-multiarch.patch b/omnibus/config/patches/python2/0001-disable-multiarch.patch new file mode 100644 index 0000000000000..eacb000306d58 --- /dev/null +++ b/omnibus/config/patches/python2/0001-disable-multiarch.patch @@ -0,0 +1,10 @@ +--- a/setup.py 2024-08-02 16:54:59.662444071 +0200 ++++ b/setup.py 2024-08-02 16:55:29.130442616 +0200 +@@ -506,7 +506,6 @@ + add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') + if cross_compiling: + self.add_gcc_paths() +- self.add_multiarch_paths() + + # Add paths specified in the environment variables LDFLAGS and + # CPPFLAGS for header and library files. diff --git a/omnibus/config/projects/agent-binaries.rb b/omnibus/config/projects/agent-binaries.rb index d401fffcf1364..bd9e9c790a975 100644 --- a/omnibus/config/projects/agent-binaries.rb +++ b/omnibus/config/projects/agent-binaries.rb @@ -20,7 +20,7 @@ install_dir "C:/opt/datadog-agent/" maintainer 'Datadog Inc.' # Windows doesn't want our e-mail address :( else - install_dir '/opt/datadog-agent' + install_dir ENV["INSTALL_DIR"] || '/opt/datadog-agent' maintainer 'Datadog Packages ' end diff --git a/omnibus/config/projects/agent.rb b/omnibus/config/projects/agent.rb index c5067c017b312..c2421a99bffb6 100644 --- a/omnibus/config/projects/agent.rb +++ b/omnibus/config/projects/agent.rb @@ -4,6 +4,7 @@ # Copyright 2016-present Datadog, Inc. require "./lib/ostools.rb" flavor = ENV['AGENT_FLAVOR'] +output_config_dir = ENV["OUTPUT_CONFIG_DIR"] if flavor.nil? || flavor == 'base' name 'agent' @@ -280,7 +281,7 @@ end if linux_target? - extra_package_file '/etc/datadog-agent/' + extra_package_file "#{output_config_dir}/etc/datadog-agent/" extra_package_file '/usr/bin/dd-agent' extra_package_file '/var/log/datadog/' end diff --git a/omnibus/config/projects/dogstatsd.rb b/omnibus/config/projects/dogstatsd.rb index 76800d8018b3b..43b5adadd94ad 100644 --- a/omnibus/config/projects/dogstatsd.rb +++ b/omnibus/config/projects/dogstatsd.rb @@ -29,7 +29,7 @@ install_dir "C:/opt/datadog-dogstatsd/" maintainer 'Datadog Inc.' # Windows doesn't want our e-mail address :( else - install_dir '/opt/datadog-dogstatsd' + install_dir ENV["INSTALL_DIR"] || '/opt/datadog-dogstatsd' if redhat_target? || suse_target? maintainer 'Datadog, Inc ' diff --git a/omnibus/config/projects/iot-agent.rb b/omnibus/config/projects/iot-agent.rb index f2e35368aab95..c46ef298d0483 100644 --- a/omnibus/config/projects/iot-agent.rb +++ b/omnibus/config/projects/iot-agent.rb @@ -20,7 +20,7 @@ install_dir "C:/opt/datadog-agent/" maintainer 'Datadog Inc.' # Windows doesn't want our e-mail address :( else - install_dir '/opt/datadog-agent' + install_dir ENV["INSTALL_DIR"] || '/opt/datadog-agent' if redhat_target? || suse_target? maintainer 'Datadog, Inc ' @@ -65,6 +65,8 @@ # creates required build directories dependency 'preparation' + dependency "systemd" if linux_target? + # Datadog agent dependency 'datadog-iot-agent' diff --git a/omnibus/config/software/datadog-agent-dependencies.rb b/omnibus/config/software/datadog-agent-dependencies.rb index c428552813d89..fe4a6332d361f 100644 --- a/omnibus/config/software/datadog-agent-dependencies.rb +++ b/omnibus/config/software/datadog-agent-dependencies.rb @@ -13,3 +13,5 @@ if with_python_runtime? "3" dependency 'datadog-agent-integrations-py3-dependencies' end + +dependency "systemd" if linux_target? diff --git a/omnibus/config/software/datadog-agent-finalize.rb b/omnibus/config/software/datadog-agent-finalize.rb index b6df0ab46e821..da54ee837006f 100644 --- a/omnibus/config/software/datadog-agent-finalize.rb +++ b/omnibus/config/software/datadog-agent-finalize.rb @@ -14,11 +14,13 @@ skip_transitive_dependency_licensing true + always_build true build do license :project_license + output_config_dir = ENV["OUTPUT_CONFIG_DIR"] flavor_arg = ENV['AGENT_FLAVOR'] # TODO too many things done here, should be split block do @@ -52,6 +54,9 @@ # load isn't supported by windows delete "#{conf_dir}/load.d" + # service_discovery isn't supported by windows + delete "#{conf_dir}/service_discovery.d" + # Remove .pyc files from embedded Python command "del /q /s #{windows_safe_path(install_dir)}\\*.pyc" end @@ -78,7 +83,7 @@ # Used in https://docs.datadoghq.com/agent/guide/python-3/ delete "#{install_dir}/embedded/bin/2to3" - link "#{install_dir}/embedded/bin/2to3-3.11", "#{install_dir}/embedded/bin/2to3" + link "#{install_dir}/embedded/bin/2to3-3.12", "#{install_dir}/embedded/bin/2to3" end delete "#{install_dir}/embedded/lib/config_guess" @@ -92,37 +97,37 @@ if linux_target? # Move configuration files - mkdir "/etc/datadog-agent" + mkdir "#{output_config_dir}/etc/datadog-agent" move "#{install_dir}/bin/agent/dd-agent", "/usr/bin/dd-agent" - move "#{install_dir}/etc/datadog-agent/datadog.yaml.example", "/etc/datadog-agent" - move "#{install_dir}/etc/datadog-agent/conf.d", "/etc/datadog-agent", :force=>true + move "#{install_dir}/etc/datadog-agent/datadog.yaml.example", "#{output_config_dir}/etc/datadog-agent" + move "#{install_dir}/etc/datadog-agent/conf.d", "#{output_config_dir}/etc/datadog-agent", :force=>true unless heroku_target? - move "#{install_dir}/etc/datadog-agent/system-probe.yaml.example", "/etc/datadog-agent" - move "#{install_dir}/etc/datadog-agent/security-agent.yaml.example", "/etc/datadog-agent", :force=>true - move "#{install_dir}/etc/datadog-agent/runtime-security.d", "/etc/datadog-agent", :force=>true - move "#{install_dir}/etc/datadog-agent/compliance.d", "/etc/datadog-agent" + move "#{install_dir}/etc/datadog-agent/system-probe.yaml.example", "#{output_config_dir}/etc/datadog-agent" + move "#{install_dir}/etc/datadog-agent/security-agent.yaml.example", "#{output_config_dir}/etc/datadog-agent", :force=>true + move "#{install_dir}/etc/datadog-agent/runtime-security.d", "#{output_config_dir}/etc/datadog-agent", :force=>true + move "#{install_dir}/etc/datadog-agent/compliance.d", "#{output_config_dir}/etc/datadog-agent" # Move SELinux policy if debian_target? || redhat_target? - move "#{install_dir}/etc/datadog-agent/selinux", "/etc/datadog-agent/selinux" + move "#{install_dir}/etc/datadog-agent/selinux", "#{output_config_dir}/etc/datadog-agent/selinux" end end if ot_target? - move "#{install_dir}/etc/datadog-agent/otel-config.yaml.example", "/etc/datadog-agent" + move "#{install_dir}/etc/datadog-agent/otel-config.yaml.example", "#{output_config_dir}/etc/datadog-agent" end # Create empty directories so that they're owned by the package # (also requires `extra_package_file` directive in project def) - mkdir "/etc/datadog-agent/checks.d" + mkdir "#{output_config_dir}/etc/datadog-agent/checks.d" mkdir "/var/log/datadog" # remove unused configs - delete "/etc/datadog-agent/conf.d/apm.yaml.default" - delete "/etc/datadog-agent/conf.d/process_agent.yaml.default" + delete "#{output_config_dir}/etc/datadog-agent/conf.d/apm.yaml.default" + delete "#{output_config_dir}/etc/datadog-agent/conf.d/process_agent.yaml.default" # remove windows specific configs - delete "/etc/datadog-agent/conf.d/winproc.d" + delete "#{output_config_dir}/etc/datadog-agent/conf.d/winproc.d" # cleanup clutter delete "#{install_dir}/etc" @@ -174,6 +179,9 @@ # Most postgres binaries are removed in postgres' own software # recipe, but we need pg_config to build psycopq. delete "#{install_dir}/embedded/bin/pg_config" + + # Edit rpath from a true path to relative path for each binary + command "inv omnibus.rpath-edit #{install_dir} #{install_dir}", cwd: Dir.pwd end if osx_target? @@ -186,6 +194,9 @@ # remove docker configuration delete "#{install_dir}/etc/conf.d/docker.d" + # Edit rpath from a true path to relative path for each binary + command "inv omnibus.rpath-edit #{install_dir} #{install_dir} --platform=macos", cwd: Dir.pwd + if ENV['HARDENED_RUNTIME_MAC'] == 'true' hardened_runtime = "-o runtime --entitlements #{entitlements_file} " else diff --git a/omnibus/config/software/datadog-agent-integrations-py2.rb b/omnibus/config/software/datadog-agent-integrations-py2.rb index d654bf752b6e0..ee05b6d366b32 100644 --- a/omnibus/config/software/datadog-agent-integrations-py2.rb +++ b/omnibus/config/software/datadog-agent-integrations-py2.rb @@ -78,14 +78,15 @@ } # Install dependencies + python_version = "py2" lockfile_name = case when linux_target? - arm_target? ? "linux-aarch64_py2.txt" : "linux-x86_64_py2.txt" + arm_target? ? "linux-aarch64" : "linux-x86_64" when osx_target? - "macos-x86_64_py2.txt" + "macos-x86_64" when windows_target? - "windows-x86_64_py2.txt" - end + "windows-x86_64" + end + "_#{python_version}.txt" lockfile = windows_safe_path(project_dir, ".deps", "resolved", lockfile_name) command "#{python} -m pip install --require-hashes --only-binary=:all: --no-deps -r #{lockfile}" @@ -131,8 +132,6 @@ # Retrieving integrations from cache cache_bucket = ENV.fetch('INTEGRATION_WHEELS_CACHE_BUCKET', '') cache_branch = (shellout! "inv release.get-release-json-value base_branch", cwd: File.expand_path('..', tasks_dir_in)).stdout.strip - # On windows, `aws` actually executes Ruby's AWS SDK, but we want the Python one - awscli = if windows_target? then '"c:\Program files\python311\scripts\aws"' else 'aws' end if cache_bucket != '' mkdir cached_wheels_dir shellout! "inv -e agent.get-integrations-from-cache " \ @@ -140,8 +139,7 @@ "--branch #{cache_branch || 'main'} " \ "--integrations-dir #{windows_safe_path(project_dir)} " \ "--target-dir #{cached_wheels_dir} " \ - "--integrations #{checks_to_install.join(',')} " \ - "--awscli #{awscli}", + "--integrations #{checks_to_install.join(',')}", :cwd => tasks_dir_in # install all wheels from cache in one pip invocation to speed things up @@ -218,8 +216,7 @@ "--branch #{cache_branch} " \ "--integrations-dir #{windows_safe_path(project_dir)} " \ "--build-dir #{wheel_build_dir} " \ - "--integration #{check} " \ - "--awscli #{awscli}", + "--integration #{check}", :cwd => tasks_dir_in end end diff --git a/omnibus/config/software/datadog-agent-integrations-py3.rb b/omnibus/config/software/datadog-agent-integrations-py3.rb index 1ceb159dfc1fa..e29c4e3431ab4 100644 --- a/omnibus/config/software/datadog-agent-integrations-py3.rb +++ b/omnibus/config/software/datadog-agent-integrations-py3.rb @@ -13,11 +13,13 @@ dependency 'datadog-agent-integrations-py3-dependencies' +python_version = "3.12" + relative_path 'integrations-core' -whitelist_file "embedded/lib/python3.11/site-packages/.libsaerospike" -whitelist_file "embedded/lib/python3.11/site-packages/aerospike.libs" -whitelist_file "embedded/lib/python3.11/site-packages/psycopg2" -whitelist_file "embedded/lib/python3.11/site-packages/pymqi" +whitelist_file "embedded/lib/python#{python_version}/site-packages/.libsaerospike" +whitelist_file "embedded/lib/python#{python_version}/site-packages/aerospike.libs" +whitelist_file "embedded/lib/python#{python_version}/site-packages/psycopg2" +whitelist_file "embedded/lib/python#{python_version}/site-packages/pymqi" source git: 'https://github.com/DataDog/integrations-core.git' @@ -79,12 +81,12 @@ # Install dependencies lockfile_name = case when linux_target? - arm_target? ? "linux-aarch64_py3.txt" : "linux-x86_64_py3.txt" + arm_target? ? "linux-aarch64" : "linux-x86_64" when osx_target? - "macos-x86_64_py3.txt" + "macos-x86_64" when windows_target? - "windows-x86_64_py3.txt" - end + "windows-x86_64" + end + "_#{python_version}.txt" lockfile = windows_safe_path(project_dir, ".deps", "resolved", lockfile_name) command "#{python} -m pip install --require-hashes --only-binary=:all: --no-deps -r #{lockfile}" @@ -128,12 +130,9 @@ shellout! "inv agent.collect-integrations #{project_dir} 3 #{os} #{excluded_folders.join(',')}", :cwd => tasks_dir_in ).stdout.split() - # Retrieving integrations from cache cache_bucket = ENV.fetch('INTEGRATION_WHEELS_CACHE_BUCKET', '') cache_branch = (shellout! "inv release.get-release-json-value base_branch", cwd: File.expand_path('..', tasks_dir_in)).stdout.strip - # On windows, `aws` actually executes Ruby's AWS SDK, but we want the Python one - awscli = if windows_target? then '"c:\Program files\python311\scripts\aws"' else 'aws' end if cache_bucket != '' mkdir cached_wheels_dir shellout! "inv -e agent.get-integrations-from-cache " \ @@ -141,8 +140,7 @@ "--branch #{cache_branch || 'main'} " \ "--integrations-dir #{windows_safe_path(project_dir)} " \ "--target-dir #{cached_wheels_dir} " \ - "--integrations #{checks_to_install.join(',')} " \ - "--awscli #{awscli}", + "--integrations #{checks_to_install.join(',')}", :cwd => tasks_dir_in # install all wheels from cache in one pip invocation to speed things up @@ -217,20 +215,12 @@ "--branch #{cache_branch} " \ "--integrations-dir #{windows_safe_path(project_dir)} " \ "--build-dir #{wheel_build_dir} " \ - "--integration #{check} " \ - "--awscli #{awscli}", + "--integration #{check}", :cwd => tasks_dir_in end end end - # Patch applies to only one file: set it explicitly as a target, no need for -p - if windows_target? - patch :source => "remove-maxfile-maxpath-psutil.patch", :target => "#{python_3_embedded}/Lib/site-packages/psutil/__init__.py" - else - patch :source => "remove-maxfile-maxpath-psutil.patch", :target => "#{install_dir}/embedded/lib/python3.11/site-packages/psutil/__init__.py" - end - # Run pip check to make sure the agent's python environment is clean, all the dependencies are compatible command "#{python} -m pip check" @@ -238,7 +228,7 @@ if windows_target? delete "#{python_3_embedded}/Lib/site-packages/Cryptodome/SelfTest/" else - delete "#{install_dir}/embedded/lib/python3.11/site-packages/Cryptodome/SelfTest/" + delete "#{install_dir}/embedded/lib/python#{python_version}/site-packages/Cryptodome/SelfTest/" end # Ship `requirements-agent-release.txt` file containing the versions of every check shipped with the agent diff --git a/omnibus/config/software/datadog-agent.rb b/omnibus/config/software/datadog-agent.rb index ef24f0b1940ed..1ce6074d704ab 100644 --- a/omnibus/config/software/datadog-agent.rb +++ b/omnibus/config/software/datadog-agent.rb @@ -65,8 +65,7 @@ end # include embedded path (mostly for `pkg-config` binary) - env = with_embedded_path(env) - + env = with_standard_compiler_flags(with_embedded_path(env)) # we assume the go deps are already installed before running omnibus if windows_target? platform = windows_arch_i386? ? "x86" : "x64" @@ -233,6 +232,7 @@ # The file below is touched by software builds that don't put anything in the installation # directory (libgcc right now) so that the git_cache gets updated let's remove it from the # final package + # Change RPATH from the install_dir to relative RPATH unless windows_target? delete "#{install_dir}/uselessfile" end diff --git a/omnibus/config/software/datadog-iot-agent.rb b/omnibus/config/software/datadog-iot-agent.rb index 4fc7aeb3f76db..ac2b4887928bc 100644 --- a/omnibus/config/software/datadog-iot-agent.rb +++ b/omnibus/config/software/datadog-iot-agent.rb @@ -37,6 +37,7 @@ else major_version_arg = "$MAJOR_VERSION" py_runtimes_arg = "$PY_RUNTIMES" + env['CGO_CFLAGS'] = "-I#{install_dir}/embedded/include" end if linux_target? diff --git a/omnibus/config/software/init-scripts-agent.rb b/omnibus/config/software/init-scripts-agent.rb index b425b8bd4f334..7add284c1ba38 100644 --- a/omnibus/config/software/init-scripts-agent.rb +++ b/omnibus/config/software/init-scripts-agent.rb @@ -5,8 +5,9 @@ always_build true build do + output_config_dir = ENV["OUTPUT_CONFIG_DIR"] || "" if linux_target? - etc_dir = "/etc/datadog-agent" + etc_dir = "#{output_config_dir}/etc/datadog-agent" mkdir "/etc/init" if debian_target? # sysvinit support for debian only for now diff --git a/omnibus/config/software/installer.rb b/omnibus/config/software/installer.rb index fee51d35e8084..99708622641fb 100644 --- a/omnibus/config/software/installer.rb +++ b/omnibus/config/software/installer.rb @@ -32,7 +32,7 @@ env = with_embedded_path(env) if linux_target? - command "invoke installer.build --rebuild --run-path=/opt/datadog-packages/run --install-path=#{install_dir}", env: env + command "invoke installer.build --rebuild --no-cgo --run-path=/opt/datadog-packages/run --install-path=#{install_dir}", env: env mkdir "#{install_dir}/bin" copy 'bin/installer', "#{install_dir}/bin/" elsif windows_target? diff --git a/omnibus/config/software/libgcrypt.rb b/omnibus/config/software/libgcrypt.rb index 0ea98ed159a51..af37532e33c69 100644 --- a/omnibus/config/software/libgcrypt.rb +++ b/omnibus/config/software/libgcrypt.rb @@ -36,6 +36,9 @@ env["CFLAGS"] = "-I#{install_dir}/embedded/include -O1 -D_FORTIFY_SOURCE=1 -fPIC" + patch source: "0001-disable-tests-building.patch" + command 'autoreconf -vif' + configure_options = [ "--enable-maintainer-mode", ] diff --git a/omnibus/config/software/libxcrypt.rb b/omnibus/config/software/libxcrypt.rb index 921e6c5e71478..d0451bec61dfa 100644 --- a/omnibus/config/software/libxcrypt.rb +++ b/omnibus/config/software/libxcrypt.rb @@ -4,10 +4,10 @@ # Copyright 2022-present Datadog, Inc. name "libxcrypt" -default_version "4.4.28" +default_version "4.4.36" -version "4.4.28" do - source sha256: "9e936811f9fad11dbca33ca19bd97c55c52eb3ca15901f27ade046cc79e69e87" +version "4.4.36" do + source sha256: "e5e1f4caee0a01de2aee26e3138807d6d3ca2b8e67287966d1fefd65e1fd8943" end ship_source_offer true diff --git a/omnibus/config/software/python2.rb b/omnibus/config/software/python2.rb index a1c9c4e8fc059..f35a7275e1cd6 100644 --- a/omnibus/config/software/python2.rb +++ b/omnibus/config/software/python2.rb @@ -55,6 +55,7 @@ patch :source => "avoid-allocating-thunks-in-ctypes.patch" if linux_target? patch :source => "fix-platform-ubuntu.diff" if linux_target? + patch :source => "0001-disable-multiarch.patch" if linux_target? # security patches backported by the debian community # see: http://deb.debian.org/debian/pool/main/p/python2.7/python2.7_2.7.18-6.diff.gz patch :source => "python2.7_2.7.18-cve-2019-20907.diff" unless windows_target? diff --git a/omnibus/config/software/python3.rb b/omnibus/config/software/python3.rb index ffad4329c3d09..fec3777a5d1cc 100644 --- a/omnibus/config/software/python3.rb +++ b/omnibus/config/software/python3.rb @@ -1,6 +1,6 @@ name "python3" -default_version "3.11.8" +default_version "3.12.6" if ohai["platform"] != "windows" @@ -15,11 +15,12 @@ dependency "libyaml" source :url => "https://python.org/ftp/python/#{version}/Python-#{version}.tgz", - :sha256 => "d3019a613b9e8761d260d9ebe3bd4df63976de30464e5c0189566e1ae3f61889" + :sha256 => "85a4c1be906d20e5c5a69f2466b00da769c221d6a684acfd3a514dbf5bf10a66" relative_path "Python-#{version}" python_configure_options = [ + "--without-readline", # Disables readline support "--with-ensurepip=yes" # We upgrade pip later, in the pip3 software definition ] @@ -53,7 +54,6 @@ delete "#{install_dir}/embedded/lib/python#{major}.#{minor}/test" block do - FileUtils.rm_f(Dir.glob("#{install_dir}/embedded/lib/python#{major}.#{minor}/lib-dynload/readline.*")) FileUtils.rm_f(Dir.glob("#{install_dir}/embedded/lib/python#{major}.#{minor}/distutils/command/wininst-*.exe")) end end @@ -63,7 +63,7 @@ # note that starting with 3.7.3 on Windows, the zip should be created without the built-in pip source :url => "https://dd-agent-omnibus.s3.amazonaws.com/python-windows-#{version}-amd64.zip", - :sha256 => "8b016ed2f94cfc027fed172cbf1f6043f64519c6e9ad70b4565635192228b2b6".downcase + :sha256 => "045d20a659fe80041b6fd508b77f250b03330347d64f128b392b88e68897f5a0".downcase vcrt140_root = "#{Omnibus::Config.source_dir()}/vc_redist_140/expanded" build do diff --git a/omnibus/config/software/systemd.rb b/omnibus/config/software/systemd.rb new file mode 100644 index 0000000000000..b39b92d77d316 --- /dev/null +++ b/omnibus/config/software/systemd.rb @@ -0,0 +1,43 @@ +# +# Copyright:: Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +name "systemd" +default_version "253" + +license "GPLv2" +license "LGPL-2.1" +license_file "LICENSE.GPL2" +license_file "LICENSE.LGPL2.1" +skip_transitive_dependency_licensing true + +version("253") { source sha256: "acbd86d42ebc2b443722cb469ad215a140f504689c7a9133ecf91b235275a491" } + +ship_source_offer true + +source url: "https://github.com/systemd/systemd/archive/refs/tags/v#{version}.tar.gz" + +relative_path "#{name}-#{version}" + +build do + env = with_standard_compiler_flags(with_embedded_path) + + # We only need the headers for coreos/go-systemd, and building + # libsystemd itself would be fairly complicated as our toolchain doesn't + # default include `/usr/include` in its default include path, while systemd + # definitely need files in /usr/include/sys to build. + mkdir "#{install_dir}/embedded/include/systemd" + copy "src/systemd/*.h", "#{install_dir}/embedded/include/systemd/" +end diff --git a/omnibus/lib/project_extension.rb b/omnibus/lib/project_extension.rb index 50c6ca2b70e20..50def10f56ec5 100644 --- a/omnibus/lib/project_extension.rb +++ b/omnibus/lib/project_extension.rb @@ -41,26 +41,48 @@ def package_me def ddwcssign(file) log.info(self.class.name) { "Signing #{file}" } - cmd = Array.new.tap do |arr| + + # Signing is inherently flaky as the timestamp server may not be available + # retry a few times + max_retries = 3 + attempts = 0 + delay = 2 + + begin + attempts += 1 + cmd = Array.new.tap do |arr| arr << "dd-wcs" arr << "sign" arr << "\"#{file}\"" - end.join(" ") - status = shellout(cmd) - if status.exitstatus != 0 - log.warn(self.class.name) do - <<-EOH.strip - Failed to sign with dd-wcs - - STDOUT - ------ - #{status.stdout} - - STDERR - ------ - #{status.stderr} - EOH + end.join(" ") + + status = shellout(cmd) + if status.exitstatus != 0 + log.warn(self.class.name) do + <<-EOH.strip + Failed to sign with dd-wcs (Attempt #{attempts} of #{max_retries}) + + STDOUT + ------ + #{status.stdout} + + STDERR + ------ + #{status.stderr} + EOH + end + raise "Failed to sign with dd-wcs" + else + log.info(self.class.name) { "Successfully signed #{file} after #{attempts} attempt(s)" } + end + rescue => e + # Retry logic: raise error after 3 attempts + if attempts < max_retries + log.info(self.class.name) { "Retrying signing #{file} (Attempt #{attempts + 1})" } + sleep(delay) + retry end + raise "Failed to sign with dd-wcs: #{e.message}" end end @@ -78,4 +100,4 @@ class Project expose :inspect_binary expose :sign_file end -end \ No newline at end of file +end diff --git a/omnibus/package-scripts/agent-rpm/posttrans b/omnibus/package-scripts/agent-rpm/posttrans index 06ac058fd1ac4..a560a4626b45d 100755 --- a/omnibus/package-scripts/agent-rpm/posttrans +++ b/omnibus/package-scripts/agent-rpm/posttrans @@ -10,7 +10,7 @@ INSTALL_DIR=/opt/datadog-agent CONFIG_DIR=/etc/datadog-agent SERVICE_NAME=datadog-agent -KNOWN_DISTRIBUTION="(Debian|Ubuntu|RedHat|CentOS|openSUSE|Amazon|Arista|SUSE|Rocky|AlmaLinux)" +KNOWN_DISTRIBUTION="(Debian|Ubuntu|RedHat|CentOS|openSUSE|Amazon|Arista|SUSE|Rocky|AlmaLinux|Oracle)" DISTRIBUTION=$(lsb_release -d 2>/dev/null | grep -Eo $KNOWN_DISTRIBUTION || grep -Eo $KNOWN_DISTRIBUTION /etc/issue 2>/dev/null || grep -Eo $KNOWN_DISTRIBUTION /etc/Eos-release 2>/dev/null || grep -m1 -Eo $KNOWN_DISTRIBUTION /etc/os-release 2>/dev/null || uname -s) # Create a symlink to the agent's binary @@ -28,8 +28,8 @@ fi # Only install selinux policy on a few known distributions INSTALL_SELINUX_POLICY="false" -if [ "$DISTRIBUTION" = "CentOS" ] || [ "$DISTRIBUTION" = "RedHat" ] || [ "$DISTRIBUTION" = "Rocky" ] || [ "$DISTRIBUTION" = "AlmaLinux" ]; then - OS_RELEASE_VERSION=$(grep VERSION_ID /etc/os-release | cut -d = -f 2 | tr -d '"') +if [ "$DISTRIBUTION" = "CentOS" ] || [ "$DISTRIBUTION" = "RedHat" ] || [ "$DISTRIBUTION" = "Rocky" ] || [ "$DISTRIBUTION" = "AlmaLinux" ] || [ "$DISTRIBUTION" = "Oracle" ]; then + OS_RELEASE_VERSION=$(grep VERSION_ID /etc/os-release | cut -d = -f 2 | cut -d . -f 1 | tr -d '"') if [ "$OS_RELEASE_VERSION" = "7" ]; then INSTALL_SELINUX_POLICY="true" fi diff --git a/pkg/aggregator/aggregator.go b/pkg/aggregator/aggregator.go index 41fbeb1f93ad4..f8cac0884df2d 100644 --- a/pkg/aggregator/aggregator.go +++ b/pkg/aggregator/aggregator.go @@ -18,7 +18,8 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -258,7 +259,7 @@ type FlushAndSerializeInParallel struct { } // NewFlushAndSerializeInParallel creates a new instance of FlushAndSerializeInParallel. -func NewFlushAndSerializeInParallel(config config.Config) FlushAndSerializeInParallel { +func NewFlushAndSerializeInParallel(config model.Config) FlushAndSerializeInParallel { return FlushAndSerializeInParallel{ BufferSize: config.GetInt("aggregator_flush_metrics_and_serialize_in_parallel_buffer_size"), ChannelSize: config.GetInt("aggregator_flush_metrics_and_serialize_in_parallel_chan_size"), @@ -267,21 +268,21 @@ func NewFlushAndSerializeInParallel(config config.Config) FlushAndSerializeInPar // NewBufferedAggregator instantiates a BufferedAggregator func NewBufferedAggregator(s serializer.MetricSerializer, eventPlatformForwarder eventplatform.Component, hostname string, flushInterval time.Duration) *BufferedAggregator { - bufferSize := config.Datadog().GetInt("aggregator_buffer_size") + bufferSize := pkgconfigsetup.Datadog().GetInt("aggregator_buffer_size") agentName := flavor.GetFlavor() - if agentName == flavor.IotAgent && !config.Datadog().GetBool("iot_host") { + if agentName == flavor.IotAgent && !pkgconfigsetup.Datadog().GetBool("iot_host") { agentName = flavor.DefaultAgent - } else if config.Datadog().GetBool("iot_host") { + } else if pkgconfigsetup.Datadog().GetBool("iot_host") { // Override the agentName if this Agent is configured to report as IotAgent agentName = flavor.IotAgent } - if config.Datadog().GetBool("heroku_dyno") { + if pkgconfigsetup.Datadog().GetBool("heroku_dyno") { // Override the agentName if this Agent is configured to report as Heroku Dyno agentName = flavor.HerokuAgent } - if config.Datadog().GetBool("djm_config.enabled") { + if pkgconfigsetup.Datadog().GetBool("djm_config.enabled") { AddRecurrentSeries(&metrics.Serie{ Name: "datadog.djm.agent_host", Points: []metrics.Point{{Value: 1.0}}, @@ -289,7 +290,7 @@ func NewBufferedAggregator(s serializer.MetricSerializer, eventPlatformForwarder }) } - tagsStore := tags.NewStore(config.Datadog().GetBool("aggregator_use_tags_store"), "aggregator") + tagsStore := tags.NewStore(pkgconfigsetup.Datadog().GetBool("aggregator_use_tags_store"), "aggregator") aggregator := &BufferedAggregator{ bufferedServiceCheckIn: make(chan []*servicecheck.ServiceCheck, bufferSize), @@ -316,10 +317,10 @@ func NewBufferedAggregator(s serializer.MetricSerializer, eventPlatformForwarder stopChan: make(chan struct{}), health: health.RegisterLiveness("aggregator"), agentName: agentName, - tlmContainerTagsEnabled: config.Datadog().GetBool("basic_telemetry_add_container_tags"), + tlmContainerTagsEnabled: pkgconfigsetup.Datadog().GetBool("basic_telemetry_add_container_tags"), agentTags: tagger.AgentTags, globalTags: tagger.GlobalTags, - flushAndSerializeInParallel: NewFlushAndSerializeInParallel(config.Datadog()), + flushAndSerializeInParallel: NewFlushAndSerializeInParallel(pkgconfigsetup.Datadog()), } return aggregator @@ -642,7 +643,7 @@ func (agg *BufferedAggregator) flushServiceChecks(start time.Time, waitForSerial addFlushCount("ServiceChecks", int64(len(serviceChecks))) // For debug purposes print out all serviceCheck/tag combinations - if config.Datadog().GetBool("log_payloads") { + if pkgconfigsetup.Datadog().GetBool("log_payloads") { log.Debug("Flushing the following Service Checks:") for _, sc := range serviceChecks { log.Debugf("%s", sc) @@ -699,7 +700,7 @@ func (agg *BufferedAggregator) flushEvents(start time.Time, waitForSerializer bo addFlushCount("Events", int64(len(events))) // For debug purposes print out all Event/tag combinations - if config.Datadog().GetBool("log_payloads") { + if pkgconfigsetup.Datadog().GetBool("log_payloads") { log.Debug("Flushing the following Events:") for _, event := range events { log.Debugf("%s", event) @@ -932,10 +933,10 @@ func (agg *BufferedAggregator) handleRegisterSampler(id checkid.ID) { return } agg.checkSamplers[id] = newCheckSampler( - config.Datadog().GetInt("check_sampler_bucket_commits_count_expiry"), - config.Datadog().GetBool("check_sampler_expire_metrics"), - config.Datadog().GetBool("check_sampler_context_metrics"), - config.Datadog().GetDuration("check_sampler_stateful_metric_expiration_time"), + pkgconfigsetup.Datadog().GetInt("check_sampler_bucket_commits_count_expiry"), + pkgconfigsetup.Datadog().GetBool("check_sampler_expire_metrics"), + pkgconfigsetup.Datadog().GetBool("check_sampler_context_metrics"), + pkgconfigsetup.Datadog().GetDuration("check_sampler_stateful_metric_expiration_time"), agg.tagsStore, id, ) diff --git a/pkg/aggregator/aggregator_test.go b/pkg/aggregator/aggregator_test.go index 74a4fb14ffa79..cf577d0f8397a 100644 --- a/pkg/aggregator/aggregator_test.go +++ b/pkg/aggregator/aggregator_test.go @@ -28,7 +28,7 @@ import ( orchestratorforwarder "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" @@ -575,8 +575,8 @@ func TestTags(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - defer pkgconfig.Datadog().SetWithoutSource("basic_telemetry_add_container_tags", nil) - pkgconfig.Datadog().SetWithoutSource("basic_telemetry_add_container_tags", tt.tlmContainerTagsEnabled) + defer pkgconfigsetup.Datadog().SetWithoutSource("basic_telemetry_add_container_tags", nil) + pkgconfigsetup.Datadog().SetWithoutSource("basic_telemetry_add_container_tags", tt.tlmContainerTagsEnabled) agg := NewBufferedAggregator(nil, nil, tt.hostname, time.Second) agg.agentTags = tt.agentTags agg.globalTags = tt.globalTags @@ -586,9 +586,9 @@ func TestTags(t *testing.T) { } func TestTimeSamplerFlush(t *testing.T) { - pc := pkgconfig.Datadog().GetInt("dogstatsd_pipeline_count") - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", 1) - defer pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) + pc := pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", 1) + defer pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) s := &MockSerializerIterableSerie{} s.On("AreSeriesEnabled").Return(true) @@ -607,9 +607,9 @@ func TestAddDJMRecurrentSeries(t *testing.T) { // this test IS USING globals (recurrentSeries) // - - djmEnabled := pkgconfig.Datadog().GetBool("djm_config.enabled") - pkgconfig.Datadog().SetWithoutSource("djm_config.enabled", true) - defer pkgconfig.Datadog().SetWithoutSource("djm_config.enabled", djmEnabled) + djmEnabled := pkgconfigsetup.Datadog().GetBool("djm_config.enabled") + pkgconfigsetup.Datadog().SetWithoutSource("djm_config.enabled", true) + defer pkgconfigsetup.Datadog().SetWithoutSource("djm_config.enabled", djmEnabled) s := &MockSerializerIterableSerie{} // NewBufferedAggregator with DJM enable will create a new recurrentSeries diff --git a/pkg/aggregator/check_sampler.go b/pkg/aggregator/check_sampler.go index 729ee6153d341..0efa8238da958 100644 --- a/pkg/aggregator/check_sampler.go +++ b/pkg/aggregator/check_sampler.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -54,7 +54,7 @@ func (cs *CheckSampler) addSample(metricSample *metrics.MetricSample) { return } - if err := cs.metrics.AddSample(contextKey, metricSample, metricSample.Timestamp, 1, config.Datadog()); err != nil { + if err := cs.metrics.AddSample(contextKey, metricSample, metricSample.Timestamp, 1, pkgconfigsetup.Datadog()); err != nil { log.Debugf("Ignoring sample '%s' on host '%s' and tags '%s': %s", metricSample.Name, metricSample.Host, metricSample.Tags, err) } } diff --git a/pkg/aggregator/check_sampler_bench_test.go b/pkg/aggregator/check_sampler_bench_test.go index 71a32170b8575..4c731fd090390 100644 --- a/pkg/aggregator/check_sampler_bench_test.go +++ b/pkg/aggregator/check_sampler_bench_test.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -43,10 +43,10 @@ func benchmarkAddBucket(bucketValue int64, b *testing.B) { // For some reasons using InitAggregator[WithInterval] doesn't fix the problem, // but this do. deps := fxutil.Test[benchmarkDeps](b, core.MockBundle()) - forwarderOpts := forwarder.NewOptionsWithResolvers(config.Datadog(), deps.Log, resolver.NewSingleDomainResolvers(map[string][]string{"hello": {"world"}})) + forwarderOpts := forwarder.NewOptionsWithResolvers(pkgconfigsetup.Datadog(), deps.Log, resolver.NewSingleDomainResolvers(map[string][]string{"hello": {"world"}})) options := DefaultAgentDemultiplexerOptions() options.DontStartForwarders = true - sharedForwarder := forwarder.NewDefaultForwarder(config.Datadog(), deps.Log, forwarderOpts) + sharedForwarder := forwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), deps.Log, forwarderOpts) orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(deps.Hostname)) demux := InitAndStartAgentDemultiplexer(deps.Log, sharedForwarder, &orchestratorForwarder, options, eventPlatformForwarder, deps.Compressor, "hostname") diff --git a/pkg/aggregator/demultiplexer.go b/pkg/aggregator/demultiplexer.go index 1896c60711127..5a9b76c2b5811 100644 --- a/pkg/aggregator/demultiplexer.go +++ b/pkg/aggregator/demultiplexer.go @@ -8,7 +8,7 @@ package aggregator import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" @@ -146,8 +146,8 @@ func GetDogStatsDWorkerAndPipelineCount() (int, int) { func getDogStatsDWorkerAndPipelineCount(vCPUs int) (int, int) { var dsdWorkerCount int var pipelineCount int - autoAdjust := config.Datadog().GetBool("dogstatsd_pipeline_autoadjust") - autoAdjustStrategy := config.Datadog().GetString("dogstatsd_pipeline_autoadjust_strategy") + autoAdjust := pkgconfigsetup.Datadog().GetBool("dogstatsd_pipeline_autoadjust") + autoAdjustStrategy := pkgconfigsetup.Datadog().GetString("dogstatsd_pipeline_autoadjust_strategy") if autoAdjustStrategy != AutoAdjustStrategyMaxThroughput && autoAdjustStrategy != AutoAdjustStrategyPerOrigin { log.Warnf("Invalid value for 'dogstatsd_pipeline_autoadjust_strategy', using default value: %s", AutoAdjustStrategyMaxThroughput) @@ -160,7 +160,7 @@ func getDogStatsDWorkerAndPipelineCount(vCPUs int) (int, int) { // ------------------------------------ if !autoAdjust { - pipelineCount = config.Datadog().GetInt("dogstatsd_pipeline_count") + pipelineCount = pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") if pipelineCount <= 0 { // guard against configuration mistakes pipelineCount = 1 } @@ -199,7 +199,7 @@ func getDogStatsDWorkerAndPipelineCount(vCPUs int) (int, int) { pipelineCount = 1 } - if config.Datadog().GetInt("dogstatsd_pipeline_count") > 1 { + if pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") > 1 { log.Warn("DogStatsD pipeline count value ignored since 'dogstatsd_pipeline_autoadjust' is enabled.") } } else if autoAdjustStrategy == AutoAdjustStrategyPerOrigin { @@ -216,7 +216,7 @@ func getDogStatsDWorkerAndPipelineCount(vCPUs int) (int, int) { dsdWorkerCount = 2 } - pipelineCount = config.Datadog().GetInt("dogstatsd_pipeline_count") + pipelineCount = pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") if pipelineCount <= 0 { // guard against configuration mistakes pipelineCount = vCPUs * 2 } diff --git a/pkg/aggregator/demultiplexer_agent.go b/pkg/aggregator/demultiplexer_agent.go index fb077e54df6b3..fcd44e572e45c 100644 --- a/pkg/aggregator/demultiplexer_agent.go +++ b/pkg/aggregator/demultiplexer_agent.go @@ -20,8 +20,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -139,15 +139,15 @@ func initAgentDemultiplexer( hostname string) *AgentDemultiplexer { // prepare the multiple forwarders // ------------------------------- - if config.Datadog().GetBool("telemetry.enabled") && config.Datadog().GetBool("telemetry.dogstatsd_origin") && !config.Datadog().GetBool("aggregator_use_tags_store") { + if pkgconfigsetup.Datadog().GetBool("telemetry.enabled") && pkgconfigsetup.Datadog().GetBool("telemetry.dogstatsd_origin") && !pkgconfigsetup.Datadog().GetBool("aggregator_use_tags_store") { log.Warn("DogStatsD origin telemetry is not supported when aggregator_use_tags_store is disabled.") - config.Datadog().Set("telemetry.dogstatsd_origin", false, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("telemetry.dogstatsd_origin", false, model.SourceAgentRuntime) } // prepare the serializer // ---------------------- - sharedSerializer := serializer.NewSerializer(sharedForwarder, orchestratorForwarder, compressor, config.Datadog(), hostname) + sharedSerializer := serializer.NewSerializer(sharedForwarder, orchestratorForwarder, compressor, pkgconfigsetup.Datadog(), hostname) // prepare the embedded aggregator // -- @@ -157,8 +157,8 @@ func initAgentDemultiplexer( // statsd samplers // --------------- - bufferSize := config.Datadog().GetInt("aggregator_buffer_size") - metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(config.Datadog())) + bufferSize := pkgconfigsetup.Datadog().GetInt("aggregator_buffer_size") + metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(pkgconfigsetup.Datadog())) _, statsdPipelinesCount := GetDogStatsDWorkerAndPipelineCount() log.Debug("the Demultiplexer will use", statsdPipelinesCount, "pipelines") @@ -167,7 +167,7 @@ func initAgentDemultiplexer( for i := 0; i < statsdPipelinesCount; i++ { // the sampler - tagsStore := tags.NewStore(config.Datadog().GetBool("aggregator_use_tags_store"), fmt.Sprintf("timesampler #%d", i)) + tagsStore := tags.NewStore(pkgconfigsetup.Datadog().GetBool("aggregator_use_tags_store"), fmt.Sprintf("timesampler #%d", i)) statsdSampler := NewTimeSampler(TimeSamplerID(i), bucketSize, tagsStore, agg.hostname) @@ -180,9 +180,9 @@ func initAgentDemultiplexer( var noAggWorker *noAggregationStreamWorker var noAggSerializer serializer.MetricSerializer if options.EnableNoAggregationPipeline { - noAggSerializer = serializer.NewSerializer(sharedForwarder, orchestratorForwarder, compressor, config.Datadog(), hostname) + noAggSerializer = serializer.NewSerializer(sharedForwarder, orchestratorForwarder, compressor, pkgconfigsetup.Datadog(), hostname) noAggWorker = newNoAggregationStreamWorker( - config.Datadog().GetInt("dogstatsd_no_aggregation_pipeline_batch_size"), + pkgconfigsetup.Datadog().GetInt("dogstatsd_no_aggregation_pipeline_batch_size"), metricSamplePool, noAggSerializer, agg.flushAndSerializeInParallel, @@ -312,7 +312,7 @@ func (d *AgentDemultiplexer) flushLoop() { // Stop stops the demultiplexer. // Resources are released, the instance should not be used after a call to `Stop()`. func (d *AgentDemultiplexer) Stop(flush bool) { - timeout := config.Datadog().GetDuration("aggregator_stop_timeout") * time.Second + timeout := pkgconfigsetup.Datadog().GetDuration("aggregator_stop_timeout") * time.Second if d.noAggStreamWorker != nil { d.noAggStreamWorker.stop(flush) @@ -399,7 +399,7 @@ func (d *AgentDemultiplexer) flushToSerializer(start time.Time, waitForSerialize return } - logPayloads := config.Datadog().GetBool("log_payloads") + logPayloads := pkgconfigsetup.Datadog().GetBool("log_payloads") series, sketches := createIterableMetrics(d.aggregator.flushAndSerializeInParallel, d.sharedSerializer, logPayloads, false) metrics.Serialize( diff --git a/pkg/aggregator/demultiplexer_serverless.go b/pkg/aggregator/demultiplexer_serverless.go index d76d89309d5c4..97573531f5a9d 100644 --- a/pkg/aggregator/demultiplexer_serverless.go +++ b/pkg/aggregator/demultiplexer_serverless.go @@ -15,7 +15,7 @@ import ( forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer" @@ -42,16 +42,16 @@ type ServerlessDemultiplexer struct { // InitAndStartServerlessDemultiplexer creates and starts new Demultiplexer for the serverless agent. func InitAndStartServerlessDemultiplexer(keysPerDomain map[string][]string, forwarderTimeout time.Duration) *ServerlessDemultiplexer { - bufferSize := config.Datadog().GetInt("aggregator_buffer_size") + bufferSize := pkgconfigsetup.Datadog().GetInt("aggregator_buffer_size") logger := logimpl.NewTemporaryLoggerWithoutInit() - forwarder := forwarder.NewSyncForwarder(config.Datadog(), logger, keysPerDomain, forwarderTimeout) + forwarder := forwarder.NewSyncForwarder(pkgconfigsetup.Datadog(), logger, keysPerDomain, forwarderTimeout) h, _ := hostname.Get(context.Background()) - serializer := serializer.NewSerializer(forwarder, nil, compressionimpl.NewCompressor(config.Datadog()), config.Datadog(), h) - metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(config.Datadog())) - tagsStore := tags.NewStore(config.Datadog().GetBool("aggregator_use_tags_store"), "timesampler") + serializer := serializer.NewSerializer(forwarder, nil, compressionimpl.NewCompressor(pkgconfigsetup.Datadog()), pkgconfigsetup.Datadog(), h) + metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(pkgconfigsetup.Datadog())) + tagsStore := tags.NewStore(pkgconfigsetup.Datadog().GetBool("aggregator_use_tags_store"), "timesampler") statsdSampler := NewTimeSampler(TimeSamplerID(0), bucketSize, tagsStore, "") - flushAndSerializeInParallel := NewFlushAndSerializeInParallel(config.Datadog()) + flushAndSerializeInParallel := NewFlushAndSerializeInParallel(pkgconfigsetup.Datadog()) statsdWorker := newTimeSamplerWorker(statsdSampler, DefaultFlushInterval, bufferSize, metricSamplePool, flushAndSerializeInParallel, tagsStore) demux := &ServerlessDemultiplexer{ @@ -104,7 +104,7 @@ func (d *ServerlessDemultiplexer) ForceFlushToSerializer(start time.Time, waitFo d.flushLock.Lock() defer d.flushLock.Unlock() - logPayloads := config.Datadog().GetBool("log_payloads") + logPayloads := pkgconfigsetup.Datadog().GetBool("log_payloads") series, sketches := createIterableMetrics(d.flushAndSerializeInParallel, d.serializer, logPayloads, true) metrics.Serialize( diff --git a/pkg/aggregator/demultiplexer_test.go b/pkg/aggregator/demultiplexer_test.go index efd4ebccebd14..244565ddfd428 100644 --- a/pkg/aggregator/demultiplexer_test.go +++ b/pkg/aggregator/demultiplexer_test.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/comp/serializer/compression" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/stretchr/testify/assert" @@ -96,17 +96,17 @@ func TestDemuxForwardersCreated(t *testing.T) { // now, simulate a cluster-agent environment and enabled the orchestrator feature - oee := pkgconfig.Datadog().Get("orchestrator_explorer.enabled") - cre := pkgconfig.Datadog().Get("clc_runner_enabled") - ecp := pkgconfig.Datadog().Get("extra_config_providers") + oee := pkgconfigsetup.Datadog().Get("orchestrator_explorer.enabled") + cre := pkgconfigsetup.Datadog().Get("clc_runner_enabled") + ecp := pkgconfigsetup.Datadog().Get("extra_config_providers") defer func() { - pkgconfig.Datadog().SetWithoutSource("orchestrator_explorer.enabled", oee) - pkgconfig.Datadog().SetWithoutSource("clc_runner_enabled", cre) - pkgconfig.Datadog().SetWithoutSource("extra_config_providers", ecp) + pkgconfigsetup.Datadog().SetWithoutSource("orchestrator_explorer.enabled", oee) + pkgconfigsetup.Datadog().SetWithoutSource("clc_runner_enabled", cre) + pkgconfigsetup.Datadog().SetWithoutSource("extra_config_providers", ecp) }() - pkgconfig.Datadog().SetWithoutSource("orchestrator_explorer.enabled", true) - pkgconfig.Datadog().SetWithoutSource("clc_runner_enabled", true) - pkgconfig.Datadog().SetWithoutSource("extra_config_providers", []string{"clusterchecks"}) + pkgconfigsetup.Datadog().SetWithoutSource("orchestrator_explorer.enabled", true) + pkgconfigsetup.Datadog().SetWithoutSource("clc_runner_enabled", true) + pkgconfigsetup.Datadog().SetWithoutSource("extra_config_providers", []string{"clusterchecks"}) // since we're running the tests with -tags orchestrator and we've enabled the // needed feature above, we should have an orchestrator forwarder instantiated now @@ -203,18 +203,18 @@ func TestDemuxFlushAggregatorToSerializer(t *testing.T) { } func TestGetDogStatsDWorkerAndPipelineCount(t *testing.T) { - pc := pkgconfig.Datadog().GetInt("dogstatsd_pipeline_count") - aa := pkgconfig.Datadog().GetInt("dogstatsd_pipeline_autoadjust") + pc := pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") + aa := pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_autoadjust") defer func() { - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", aa) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", aa) }() assert := assert.New(t) // auto-adjust - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", true) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", true) dsdWorkers, pipelines := getDogStatsDWorkerAndPipelineCount(16) assert.Equal(8, dsdWorkers) @@ -234,8 +234,8 @@ func TestGetDogStatsDWorkerAndPipelineCount(t *testing.T) { // no auto-adjust - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", false) - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) // default value + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", false) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) // default value dsdWorkers, pipelines = getDogStatsDWorkerAndPipelineCount(16) assert.Equal(14, dsdWorkers) @@ -255,8 +255,8 @@ func TestGetDogStatsDWorkerAndPipelineCount(t *testing.T) { // no auto-adjust + pipeline count - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", false) - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", 4) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", false) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", 4) dsdWorkers, pipelines = getDogStatsDWorkerAndPipelineCount(16) assert.Equal(11, dsdWorkers) @@ -290,8 +290,7 @@ func createDemuxDepsWithOrchestratorFwd( modules := fx.Options( defaultforwarder.MockModule(), core.MockBundle(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorParams), + orchestratorForwarderImpl.Module(orchestratorParams), eventplatformimpl.Module(eventPlatformParams), eventplatformreceiverimpl.Module(), compressionimpl.MockModule(), diff --git a/pkg/aggregator/main_test.go b/pkg/aggregator/main_test.go index d13f64c63786f..0befde08183ee 100644 --- a/pkg/aggregator/main_test.go +++ b/pkg/aggregator/main_test.go @@ -11,11 +11,11 @@ import ( "os" "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestMain(m *testing.M) { - checker := config.NewChangeChecker() + checker := pkgconfigsetup.NewChangeChecker() exit := m.Run() if checker.HasChanged() { os.Exit(1) diff --git a/pkg/aggregator/mocksender/mocksender.go b/pkg/aggregator/mocksender/mocksender.go index 41e7514787d3d..85420860a4672 100644 --- a/pkg/aggregator/mocksender/mocksender.go +++ b/pkg/aggregator/mocksender/mocksender.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -40,7 +40,7 @@ func CreateDefaultDemultiplexer() *aggregator.AgentDemultiplexer { opts.FlushInterval = 1 * time.Hour opts.DontStartForwarders = true log := logimpl.NewTemporaryLoggerWithoutInit() - sharedForwarder := forwarder.NewDefaultForwarder(config.Datadog(), log, forwarder.NewOptions(config.Datadog(), log, nil)) + sharedForwarder := forwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), log, forwarder.NewOptions(pkgconfigsetup.Datadog(), log, nil)) orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostnameimpl.NewHostnameService())) return aggregator.InitAndStartAgentDemultiplexer(log, sharedForwarder, &orchestratorForwarder, opts, eventPlatformForwarder, compressionimpl.NewMockCompressor(), "") diff --git a/pkg/aggregator/no_aggregation_stream_worker.go b/pkg/aggregator/no_aggregation_stream_worker.go index 530b2fc58799c..38324f7d7dd01 100644 --- a/pkg/aggregator/no_aggregation_stream_worker.go +++ b/pkg/aggregator/no_aggregation_stream_worker.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/tagset" @@ -93,7 +93,7 @@ func newNoAggregationStreamWorker(maxMetricsPerPayload int, _ *metrics.MetricSam metricBuffer: tagset.NewHashlessTagsAccumulator(), stopChan: make(chan trigger), - samplesChan: make(chan metrics.MetricSampleBatch, config.Datadog().GetInt("dogstatsd_queue_size")), + samplesChan: make(chan metrics.MetricSampleBatch, pkgconfigsetup.Datadog().GetInt("dogstatsd_queue_size")), // warning for the unsupported metric types should appear maximum 200 times // every 5 minutes. @@ -144,7 +144,7 @@ func (w *noAggregationStreamWorker) run() { ticker := time.NewTicker(noAggWorkerStreamCheckFrequency) defer ticker.Stop() - logPayloads := config.Datadog().GetBool("log_payloads") + logPayloads := pkgconfigsetup.Datadog().GetBool("log_payloads") w.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false) stopped := false diff --git a/pkg/aggregator/test_common.go b/pkg/aggregator/test_common.go index c7bfba146a245..53e634a14cba5 100644 --- a/pkg/aggregator/test_common.go +++ b/pkg/aggregator/test_common.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // PeekSender returns a Sender with passed ID or an error if the sender is not registered @@ -34,5 +34,5 @@ func (d *AgentDemultiplexer) PeekSender(cid checkid.ID) (sender.Sender, error) { //nolint:revive // TODO(AML) Fix revive linter func NewForwarderTest(log log.Component) defaultforwarder.Forwarder { - return defaultforwarder.NewDefaultForwarder(config.Datadog(), log, defaultforwarder.NewOptions(config.Datadog(), log, nil)) + return defaultforwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), log, defaultforwarder.NewOptions(pkgconfigsetup.Datadog(), log, nil)) } diff --git a/pkg/aggregator/time_sampler.go b/pkg/aggregator/time_sampler.go index 30f41aec5726f..08a4c17eda40b 100644 --- a/pkg/aggregator/time_sampler.go +++ b/pkg/aggregator/time_sampler.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -52,8 +52,8 @@ func NewTimeSampler(id TimeSamplerID, interval int64, cache *tags.Store, hostnam idString := strconv.Itoa(int(id)) log.Infof("Creating TimeSampler #%s", idString) - contextExpireTime := config.Datadog().GetInt64("dogstatsd_context_expiry_seconds") - counterExpireTime := contextExpireTime + config.Datadog().GetInt64("dogstatsd_expiry_seconds") + contextExpireTime := pkgconfigsetup.Datadog().GetInt64("dogstatsd_context_expiry_seconds") + counterExpireTime := contextExpireTime + pkgconfigsetup.Datadog().GetInt64("dogstatsd_expiry_seconds") s := &TimeSampler{ interval: interval, @@ -97,7 +97,7 @@ func (s *TimeSampler) sample(metricSample *metrics.MetricSample, timestamp float s.metricsByTimestamp[bucketStart] = bucketMetrics } // Add sample to bucket - if err := bucketMetrics.AddSample(contextKey, metricSample, timestamp, s.interval, nil, config.Datadog()); err != nil { + if err := bucketMetrics.AddSample(contextKey, metricSample, timestamp, s.interval, nil, pkgconfigsetup.Datadog()); err != nil { log.Debugf("TimeSampler #%d Ignoring sample '%s' on host '%s' and tags '%s': %s", s.id, metricSample.Name, metricSample.Host, metricSample.Tags, err) } } @@ -264,7 +264,7 @@ func (s *TimeSampler) flushContextMetrics(contextMetricsFlusher *metrics.Context } func (s *TimeSampler) countersSampleZeroValue(timestamp int64, contextMetrics metrics.ContextMetrics) { - expirySeconds := config.Datadog().GetInt64("dogstatsd_expiry_seconds") + expirySeconds := pkgconfigsetup.Datadog().GetInt64("dogstatsd_expiry_seconds") for counterContext, entry := range s.contextResolver.resolver.contextsByKey { if entry.lastSeen+expirySeconds > timestamp && entry.context.mtype == metrics.CounterType { sample := &metrics.MetricSample{ @@ -279,13 +279,13 @@ func (s *TimeSampler) countersSampleZeroValue(timestamp int64, contextMetrics me } // Add a zero value sample to the counter // It is ok to add a 0 sample to a counter that was already sampled in the bucket, it won't change its value - contextMetrics.AddSample(counterContext, sample, float64(timestamp), s.interval, nil, config.Datadog()) //nolint:errcheck + contextMetrics.AddSample(counterContext, sample, float64(timestamp), s.interval, nil, pkgconfigsetup.Datadog()) //nolint:errcheck } } } func (s *TimeSampler) sendTelemetry(timestamp float64, series metrics.SerieSink) { - if !config.Datadog().GetBool("telemetry.enabled") { + if !pkgconfigsetup.Datadog().GetBool("telemetry.enabled") { return } @@ -296,7 +296,7 @@ func (s *TimeSampler) sendTelemetry(timestamp float64, series metrics.SerieSink) fmt.Sprintf("sampler_id:%d", s.id), } - if config.Datadog().GetBool("telemetry.dogstatsd_origin") { + if pkgconfigsetup.Datadog().GetBool("telemetry.dogstatsd_origin") { s.contextResolver.sendOriginTelemetry(timestamp, series, s.hostname, tags) } } diff --git a/pkg/api/go.mod b/pkg/api/go.mod index a1dc040010311..636c38023909b 100644 --- a/pkg/api/go.mod +++ b/pkg/api/go.mod @@ -16,7 +16,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../config/utils github.com/DataDog/datadog-agent/pkg/telemetry => ../telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../util/executable @@ -52,7 +54,9 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect @@ -81,7 +85,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -94,12 +98,12 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/api/go.sum b/pkg/api/go.sum index c0f06ba5f32fa..b8178bc54e8ad 100644 --- a/pkg/api/go.sum +++ b/pkg/api/go.sum @@ -182,8 +182,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -255,15 +256,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -299,8 +300,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -316,8 +317,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index f971da2048b6c..9e33f24d63548 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -75,8 +75,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/stats" "github.com/DataDog/datadog-agent/pkg/collector/python" "github.com/DataDog/datadog-agent/pkg/commonchecks" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" statuscollector "github.com/DataDog/datadog-agent/pkg/status/collector" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -183,18 +183,11 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { fx.Supply(status.NewInformationProvider(statuscollector.Provider{})), fx.Provide(func() serializer.MetricSerializer { return nil }), compressionimpl.Module(), - demultiplexerimpl.Module(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewNoopParams()), + // Initializing the aggregator with a flush interval of 0 (to disable the flush goroutines) + demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams(demultiplexerimpl.WithFlushInterval(0))), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewNoopParams()), eventplatformimpl.Module(eventplatforParams), eventplatformreceiverimpl.Module(), - fx.Provide(func() demultiplexerimpl.Params { - // Initializing the aggregator with a flush interval of 0 (to disable the flush goroutines) - params := demultiplexerimpl.NewDefaultParams() - params.FlushInterval = 0 - return params - }), - fx.Supply( status.Params{ PythonVersionGetFunc: python.GetPythonVersion, @@ -275,15 +268,15 @@ func run( previousIntegrationTracing := false previousIntegrationTracingExhaustive := false if cliParams.generateIntegrationTraces { - if pkgconfig.Datadog().IsSet("integration_tracing") { - previousIntegrationTracing = pkgconfig.Datadog().GetBool("integration_tracing") + if pkgconfigsetup.Datadog().IsSet("integration_tracing") { + previousIntegrationTracing = pkgconfigsetup.Datadog().GetBool("integration_tracing") } - if pkgconfig.Datadog().IsSet("integration_tracing_exhaustive") { - previousIntegrationTracingExhaustive = pkgconfig.Datadog().GetBool("integration_tracing_exhaustive") + if pkgconfigsetup.Datadog().IsSet("integration_tracing_exhaustive") { + previousIntegrationTracingExhaustive = pkgconfigsetup.Datadog().GetBool("integration_tracing_exhaustive") } - pkgconfig.Datadog().Set("integration_tracing", true, model.SourceAgentRuntime) - pkgconfig.Datadog().Set("integration_tracing_exhaustive", true, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("integration_tracing", true, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("integration_tracing_exhaustive", true, model.SourceAgentRuntime) } if len(cliParams.args) != 0 { @@ -298,7 +291,7 @@ func run( pkgcollector.InitPython(common.GetPythonPaths()...) commonchecks.RegisterChecks(wmeta, config, telemetry) - common.LoadComponents(secretResolver, wmeta, ac, pkgconfig.Datadog().GetString("confd_path")) + common.LoadComponents(secretResolver, wmeta, ac, pkgconfigsetup.Datadog().GetString("confd_path")) ac.LoadAndRun(context.Background()) // Create the CheckScheduler, but do not attach it to @@ -624,8 +617,8 @@ func run( } if cliParams.generateIntegrationTraces { - pkgconfig.Datadog().Set("integration_tracing", previousIntegrationTracing, model.SourceAgentRuntime) - pkgconfig.Datadog().Set("integration_tracing_exhaustive", previousIntegrationTracingExhaustive, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("integration_tracing", previousIntegrationTracing, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("integration_tracing_exhaustive", previousIntegrationTracingExhaustive, model.SourceAgentRuntime) } return nil diff --git a/pkg/cli/subcommands/clusterchecks/command.go b/pkg/cli/subcommands/clusterchecks/command.go index 6a29ac12c7cd3..1b41623dd4a04 100644 --- a/pkg/cli/subcommands/clusterchecks/command.go +++ b/pkg/cli/subcommands/clusterchecks/command.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -130,7 +130,7 @@ func rebalance(_ log.Component, config config.Component, cliParams *cliParams) e fmt.Println("Requesting a cluster check rebalance...") c := util.GetClient(false) // FIX: get certificates right then make this true - urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/rebalance", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/rebalance", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) // Set session token err := util.SetAuthToken(config) @@ -183,7 +183,7 @@ func isolate(_ log.Component, config config.Component, cliParams *cliParams) err if cliParams.checkID == "" { return fmt.Errorf("checkID must be specified") } - urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/isolate/check/%s", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port"), cliParams.checkID) + urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/isolate/check/%s", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"), cliParams.checkID) // Set session token err := util.SetAuthToken(config) diff --git a/pkg/cli/subcommands/dcaflare/command.go b/pkg/cli/subcommands/dcaflare/command.go index 102261f10664c..30a00d7f19f3d 100644 --- a/pkg/cli/subcommands/dcaflare/command.go +++ b/pkg/cli/subcommands/dcaflare/command.go @@ -23,9 +23,9 @@ import ( "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/input" @@ -110,7 +110,7 @@ func readProfileData(seconds int) (flare.ProfileData, error) { c := util.GetClient(false) fmt.Fprintln(color.Output, color.BlueString("Getting a %ds profile snapshot from datadog-cluster-agent.", seconds)) - pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfig.Datadog().GetInt("expvar_port")) + pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfigsetup.Datadog().GetInt("expvar_port")) for _, prof := range []struct{ name, URL string }{ { @@ -156,9 +156,9 @@ func run(cliParams *cliParams, _ config.Component) error { e error ) c := util.GetClient(false) // FIX: get certificates right then make this true - urlstr := fmt.Sprintf("https://localhost:%v/flare", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr := fmt.Sprintf("https://localhost:%v/flare", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) - logFile := pkgconfig.Datadog().GetString("log_file") + logFile := pkgconfigsetup.Datadog().GetString("log_file") if logFile == "" { logFile = path.DefaultDCALogFile } @@ -189,7 +189,7 @@ func run(cliParams *cliParams, _ config.Component) error { return nil } - if e = util.SetAuthToken(pkgconfig.Datadog()); e != nil { + if e = util.SetAuthToken(pkgconfigsetup.Datadog()); e != nil { return e } @@ -226,7 +226,7 @@ func run(cliParams *cliParams, _ config.Component) error { } } - response, e := flare.SendFlare(pkgconfig.Datadog(), filePath, cliParams.caseID, cliParams.email, helpers.NewLocalFlareSource()) + response, e := flare.SendFlare(pkgconfigsetup.Datadog(), filePath, cliParams.caseID, cliParams.email, helpers.NewLocalFlareSource()) fmt.Println(response) if e != nil { return e @@ -239,7 +239,7 @@ func newSettingsClient() (settings.Client, error) { apiConfigURL := fmt.Sprintf( "https://localhost:%v/config", - pkgconfig.Datadog().GetInt("cluster_agent.cmd_port"), + pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"), ) return settingshttp.NewClient(c, apiConfigURL, "datadog-cluster-agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil diff --git a/pkg/cli/subcommands/health/command.go b/pkg/cli/subcommands/health/command.go index f4d1233d61d99..21e0c0efb48c8 100644 --- a/pkg/cli/subcommands/health/command.go +++ b/pkg/cli/subcommands/health/command.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -73,16 +73,16 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { func requestHealth(_ log.Component, config config.Component, cliParams *cliParams) error { c := util.GetClient(false) // FIX: get certificates right then make this true - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } var urlstr string if flavor.GetFlavor() == flavor.ClusterAgent { - urlstr = fmt.Sprintf("https://%v:%v/status/health", ipcAddress, pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr = fmt.Sprintf("https://%v:%v/status/health", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) } else { - urlstr = fmt.Sprintf("https://%v:%v/agent/status/health", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + urlstr = fmt.Sprintf("https://%v:%v/agent/status/health", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) } // Set session token diff --git a/pkg/cli/subcommands/taggerlist/command.go b/pkg/cli/subcommands/taggerlist/command.go index 3ff65b7305f17..7b1e239c30574 100644 --- a/pkg/cli/subcommands/taggerlist/command.go +++ b/pkg/cli/subcommands/taggerlist/command.go @@ -16,7 +16,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/api" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -85,16 +85,16 @@ func taggerList(_ log.Component, config config.Component, _ *cliParams) error { } func getTaggerURL(_ config.Component) (string, error) { - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } var urlstr string if flavor.GetFlavor() == flavor.ClusterAgent { - urlstr = fmt.Sprintf("https://%v:%v/tagger-list", ipcAddress, pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr = fmt.Sprintf("https://%v:%v/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) } else { - urlstr = fmt.Sprintf("https://%v:%v/agent/tagger-list", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + urlstr = fmt.Sprintf("https://%v:%v/agent/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) } return urlstr, nil diff --git a/pkg/cli/subcommands/workloadlist/command.go b/pkg/cli/subcommands/workloadlist/command.go index d3576bad8c509..931f698c7aebd 100644 --- a/pkg/cli/subcommands/workloadlist/command.go +++ b/pkg/cli/subcommands/workloadlist/command.go @@ -17,7 +17,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -112,16 +112,16 @@ func workloadList(_ log.Component, config config.Component, cliParams *cliParams } func workloadURL(verbose bool) (string, error) { - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } var prefix string if flavor.GetFlavor() == flavor.ClusterAgent { - prefix = fmt.Sprintf("https://%v:%v/workload-list", ipcAddress, pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + prefix = fmt.Sprintf("https://%v:%v/workload-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) } else { - prefix = fmt.Sprintf("https://%v:%v/agent/workload-list", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + prefix = fmt.Sprintf("https://%v:%v/agent/workload-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) } if verbose { diff --git a/pkg/cloudfoundry/containertagger/container_tagger.go b/pkg/cloudfoundry/containertagger/container_tagger.go index 6915d0f10d694..a5f1805e6f86a 100644 --- a/pkg/cloudfoundry/containertagger/container_tagger.go +++ b/pkg/cloudfoundry/containertagger/container_tagger.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/utils" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -47,8 +47,8 @@ func NewContainerTagger(wmeta workloadmeta.Component) (*ContainerTagger, error) return nil, err } - retryCount := config.Datadog().GetInt("cloud_foundry_container_tagger.retry_count") - retryInterval := time.Second * time.Duration(config.Datadog().GetInt("cloud_foundry_container_tagger.retry_interval")) + retryCount := pkgconfigsetup.Datadog().GetInt("cloud_foundry_container_tagger.retry_count") + retryInterval := time.Second * time.Duration(pkgconfigsetup.Datadog().GetInt("cloud_foundry_container_tagger.retry_interval")) return &ContainerTagger{ gardenUtil: gu, @@ -106,7 +106,7 @@ func (c *ContainerTagger) processEvent(ctx context.Context, evt workloadmeta.Eve log.Debugf("Processing Event (id %s): %+v", eventID, storeContainer) // extract tags - hostTags := hostMetadataUtils.Get(ctx, true, config.Datadog()) + hostTags := hostMetadataUtils.Get(ctx, true, pkgconfigsetup.Datadog()) tags := storeContainer.CollectorTags tags = append(tags, hostTags.System...) tags = append(tags, hostTags.GoogleCloudPlatform...) @@ -161,7 +161,7 @@ func (c *ContainerTagger) processEvent(ctx context.Context, evt workloadmeta.Eve // updateTagsInContainer runs a script inside the container that handles updating the agent with the given tags func updateTagsInContainer(container garden.Container, tags []string) (int, error) { //nolint:revive // TODO(PLINT) Fix revive linter - shell_path := config.Datadog().GetString("cloud_foundry_container_tagger.shell_path") + shell_path := pkgconfigsetup.Datadog().GetString("cloud_foundry_container_tagger.shell_path") process, err := container.Run(garden.ProcessSpec{ Path: shell_path, Args: []string{"/home/vcap/app/.datadog/scripts/update_agent_config.sh"}, diff --git a/pkg/clusteragent/admission/controllers/webhook/config.go b/pkg/clusteragent/admission/controllers/webhook/config.go index 477a41c787439..697dd68a8805b 100644 --- a/pkg/clusteragent/admission/controllers/webhook/config.go +++ b/pkg/clusteragent/admission/controllers/webhook/config.go @@ -13,7 +13,7 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" ) @@ -35,16 +35,16 @@ type Config struct { // NewConfig creates a webhook controller configuration func NewConfig(admissionV1Enabled, namespaceSelectorEnabled bool) Config { return Config{ - webhookName: config.Datadog().GetString("admission_controller.webhook_name"), - secretName: config.Datadog().GetString("admission_controller.certificate.secret_name"), + webhookName: pkgconfigsetup.Datadog().GetString("admission_controller.webhook_name"), + secretName: pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name"), namespace: common.GetResourcesNamespace(), admissionV1Enabled: admissionV1Enabled, namespaceSelectorEnabled: namespaceSelectorEnabled, - svcName: config.Datadog().GetString("admission_controller.service_name"), + svcName: pkgconfigsetup.Datadog().GetString("admission_controller.service_name"), svcPort: int32(443), - timeout: config.Datadog().GetInt32("admission_controller.timeout_seconds"), - failurePolicy: config.Datadog().GetString("admission_controller.failure_policy"), - reinvocationPolicy: config.Datadog().GetString("admission_controller.reinvocation_policy"), + timeout: pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds"), + failurePolicy: pkgconfigsetup.Datadog().GetString("admission_controller.failure_policy"), + reinvocationPolicy: pkgconfigsetup.Datadog().GetString("admission_controller.reinvocation_policy"), } } diff --git a/pkg/clusteragent/admission/controllers/webhook/controller_v1_test.go b/pkg/clusteragent/admission/controllers/webhook/controller_v1_test.go index b4c990f55459a..b8cc5e2d1ed5d 100644 --- a/pkg/clusteragent/admission/controllers/webhook/controller_v1_test.go +++ b/pkg/clusteragent/admission/controllers/webhook/controller_v1_test.go @@ -31,8 +31,9 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/certificate" ) @@ -233,7 +234,7 @@ func TestGenerateTemplatesV1(t *testing.T) { matchPolicy := admiv1.Exact sideEffects := admiv1.SideEffectClassNone port := int32(443) - timeout := config.Datadog().GetInt32("admission_controller.timeout_seconds") + timeout := pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds") webhook := func(name, path string, objSelector, nsSelector *metav1.LabelSelector, operations []admiv1.OperationType, resources []string) admiv1.MutatingWebhook { return admiv1.MutatingWebhook{ Name: name, @@ -267,13 +268,13 @@ func TestGenerateTemplatesV1(t *testing.T) { } tests := []struct { name string - setupConfig func(config.Config) + setupConfig func(model.Config) configFunc func() Config want func() []admiv1.MutatingWebhook }{ { name: "config injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -296,7 +297,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "config injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -315,7 +316,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "tags injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -338,7 +339,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "tags injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -357,7 +358,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "lib injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -380,7 +381,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "lib injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -399,7 +400,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "config and tags injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) mockConfig.SetWithoutSource("admission_controller.auto_instrumentation.enabled", false) @@ -422,7 +423,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "config and tags injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -454,7 +455,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -479,7 +480,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "AKS-specific label selector without namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.add_aks_selectors", true) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) @@ -528,7 +529,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "AKS-specific label selector with namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.add_aks_selectors", true) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) @@ -574,7 +575,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "cws instrumentation", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -615,7 +616,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "cws instrumentation, mutate unlabelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -652,7 +653,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "cws instrumentation, namespace selector", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -693,7 +694,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "cws instrumentation, namespace selector, mutate unlabelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -730,7 +731,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, misconfigured profiles, supported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -750,7 +751,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, supported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -782,7 +783,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, unsupported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -802,7 +803,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -822,7 +823,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, only single namespace selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -852,7 +853,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, valid selector specified, unsupported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -872,7 +873,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, only single object selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -900,7 +901,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, one object selector and one namespace selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -928,7 +929,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, multiple selectors (should refuse to create webhook), provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -977,7 +978,7 @@ func TestGetWebhookSkeletonV1(t *testing.T) { sideEffects := admiv1.SideEffectClassNone port := int32(443) path := "/bar" - defaultTimeout := config.Datadog().GetInt32("admission_controller.timeout_seconds") + defaultTimeout := pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds") customTimeout := int32(2) namespaceSelector, _ := common.DefaultLabelSelectors(true) _, objectSelector := common.DefaultLabelSelectors(false) diff --git a/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1_test.go b/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1_test.go index 540ce5fbc44d6..3a90da5138556 100644 --- a/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1_test.go +++ b/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1_test.go @@ -31,8 +31,9 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/certificate" ) @@ -228,7 +229,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { matchPolicy := admiv1beta1.Exact sideEffects := admiv1beta1.SideEffectClassNone port := int32(443) - timeout := config.Datadog().GetInt32("admission_controller.timeout_seconds") + timeout := pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds") webhook := func(name, path string, objSelector, nsSelector *metav1.LabelSelector, operations []admiv1beta1.OperationType, resources []string) admiv1beta1.MutatingWebhook { return admiv1beta1.MutatingWebhook{ Name: name, @@ -262,13 +263,13 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { } tests := []struct { name string - setupConfig func(config.Config) + setupConfig func(model.Config) configFunc func() Config want func() []admiv1beta1.MutatingWebhook }{ { name: "config injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -291,7 +292,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "config injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -310,7 +311,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "tags injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -333,7 +334,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "tags injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -352,7 +353,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "lib injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -375,7 +376,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "lib injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -394,7 +395,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "config and tags injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) mockConfig.SetWithoutSource("admission_controller.auto_instrumentation.enabled", false) @@ -417,7 +418,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "config and tags injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -449,7 +450,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -474,7 +475,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "AKS-specific label selector without namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.add_aks_selectors", true) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) @@ -522,7 +523,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "AKS-specific label selector with namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.add_aks_selectors", true) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) @@ -567,7 +568,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "cws instrumentation", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -608,7 +609,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "cws instrumentation, mutate unlabelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -645,7 +646,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "cws instrumentation, namespace selector", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -686,7 +687,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "cws instrumentation, namespace selector, mutate unlabelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -723,7 +724,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, misconfigured profiles, supported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -743,7 +744,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, supported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -775,7 +776,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, unsupported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -795,7 +796,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -815,7 +816,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, only single namespace selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -845,7 +846,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, valid selector specified, unsupported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -865,7 +866,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, only single object selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -893,7 +894,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, one object selector and one namespace selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -921,7 +922,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, multiple selectors (should refuse to create webhook), provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -970,7 +971,7 @@ func TestGetWebhookSkeletonV1beta1(t *testing.T) { defaultReinvocationPolicy := admiv1beta1.IfNeededReinvocationPolicy port := int32(443) path := "/bar" - defaultTimeout := config.Datadog().GetInt32("admission_controller.timeout_seconds") + defaultTimeout := pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds") customTimeout := int32(2) namespaceSelector, _ := common.DefaultLabelSelectors(true) _, objectSelector := common.DefaultLabelSelectors(false) diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar.go b/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar.go index c63ac2b995d3b..544a0b0a3ec55 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar.go @@ -26,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/admission" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" apiCommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -60,8 +60,8 @@ func NewWebhook() *Webhook { return &Webhook{ name: webhookName, - isEnabled: config.Datadog().GetBool("admission_controller.agent_sidecar.enabled"), - endpoint: config.Datadog().GetString("admission_controller.agent_sidecar.endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.agent_sidecar.enabled"), + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, namespaceSelector: nsSelector, @@ -157,11 +157,11 @@ func (w *Webhook) injectAgentSidecar(pod *corev1.Pod, _ string, _ dynamic.Interf func getDefaultSidecarTemplate(containerRegistry string) *corev1.Container { ddSite := os.Getenv("DD_SITE") if ddSite == "" { - ddSite = config.DefaultSite + ddSite = pkgconfigsetup.DefaultSite } - imageName := config.Datadog().GetString("admission_controller.agent_sidecar.image_name") - imageTag := config.Datadog().GetString("admission_controller.agent_sidecar.image_tag") + imageName := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.image_name") + imageTag := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.image_tag") agentContainer := &corev1.Container{ Env: []corev1.EnvVar{ @@ -195,7 +195,7 @@ func getDefaultSidecarTemplate(containerRegistry string) *corev1.Container { }, { Name: "DD_LANGUAGE_DETECTION_ENABLED", - Value: strconv.FormatBool(config.Datadog().GetBool("language_detection.enabled") && config.Datadog().GetBool("language_detection.reporting.enabled")), + Value: strconv.FormatBool(pkgconfigsetup.Datadog().GetBool("language_detection.enabled") && pkgconfigsetup.Datadog().GetBool("language_detection.reporting.enabled")), }, }, Image: fmt.Sprintf("%s/%s:%s", containerRegistry, imageName, imageTag), @@ -213,11 +213,11 @@ func getDefaultSidecarTemplate(containerRegistry string) *corev1.Container { }, } - clusterAgentEnabled := config.Datadog().GetBool("admission_controller.agent_sidecar.cluster_agent.enabled") + clusterAgentEnabled := pkgconfigsetup.Datadog().GetBool("admission_controller.agent_sidecar.cluster_agent.enabled") if clusterAgentEnabled { - clusterAgentCmdPort := config.Datadog().GetInt("cluster_agent.cmd_port") - clusterAgentServiceName := config.Datadog().GetString("cluster_agent.kubernetes_service_name") + clusterAgentCmdPort := pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port") + clusterAgentServiceName := pkgconfigsetup.Datadog().GetString("cluster_agent.kubernetes_service_name") _, _ = withEnvOverrides(agentContainer, corev1.EnvVar{ Name: "DD_CLUSTER_AGENT_ENABLED", @@ -247,7 +247,7 @@ func getDefaultSidecarTemplate(containerRegistry string) *corev1.Container { // labelSelectors returns the mutating webhooks object selectors based on the configuration func labelSelectors() (namespaceSelector, objectSelector *metav1.LabelSelector) { // Read and parse selectors - selectorsJSON := config.Datadog().GetString("admission_controller.agent_sidecar.selectors") + selectorsJSON := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.selectors") // Get sidecar profiles _, err := loadSidecarProfiles() @@ -269,7 +269,7 @@ func labelSelectors() (namespaceSelector, objectSelector *metav1.LabelSelector) return nil, nil } - provider := config.Datadog().GetString("admission_controller.agent_sidecar.provider") + provider := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.provider") if !providerIsSupported(provider) { log.Errorf("agent sidecar provider is not supported: %v", provider) return nil, nil diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go b/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go index 628d75e76450f..c7367f62919d1 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go @@ -13,7 +13,7 @@ import ( corev1 "k8s.io/api/core/v1" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) //////////////////////////////// @@ -34,7 +34,7 @@ type ProfileOverride struct { // one profile is configured func loadSidecarProfiles() ([]ProfileOverride, error) { // Read and parse profiles - profilesJSON := config.Datadog().GetString("admission_controller.agent_sidecar.profiles") + profilesJSON := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.profiles") var profiles []ProfileOverride diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go index 8e8d424e17eef..0f03cadbb8509 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go @@ -9,12 +9,13 @@ package agentsidecar import ( "fmt" + "slices" corev1 "k8s.io/api/core/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" configWebhook "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/config" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -33,6 +34,12 @@ const dogstatsdSocket = socketDir + "/dsd.socket" // webhook to distinguish them easily. const ddSocketsVolumeName = "ddsockets" +var volumeNamesInjectedByConfigWebhook = []string{ + configWebhook.DatadogVolumeName, + configWebhook.DogstatsdSocketVolumeName, + configWebhook.TraceAgentSocketVolumeName, +} + // providerIsSupported indicates whether the provider is supported by agent sidecar injection func providerIsSupported(provider string) bool { switch provider { @@ -49,7 +56,7 @@ func providerIsSupported(provider string) bool { // applyProviderOverrides applies the necessary overrides for the provider // configured. It returns a boolean that indicates if the pod was mutated. func applyProviderOverrides(pod *corev1.Pod) (bool, error) { - provider := config.Datadog().GetString("admission_controller.agent_sidecar.provider") + provider := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.provider") if !providerIsSupported(provider) { return false, fmt.Errorf("unsupported provider: %v", provider) @@ -85,10 +92,7 @@ func applyFargateOverrides(pod *corev1.Pod) (bool, error) { return false, fmt.Errorf("can't apply profile overrides to nil pod") } - mutated := false - - deleted := deleteConfigWebhookVolumeAndMounts(pod) - mutated = mutated || deleted + mutated := deleteConfigWebhookVolumesAndMounts(pod) volume, volumeMount := socketsVolume() injected := common.InjectVolume(pod, volume, volumeMount) @@ -174,20 +178,19 @@ func socketsVolume() (corev1.Volume, corev1.VolumeMount) { return volume, volumeMount } -// deleteConfigWebhookVolumeAndMounts deletes the volume and volumeMounts added +// deleteConfigWebhookVolumesAndMounts deletes the volume and volumeMounts added // by the config webhook. Returns a boolean that indicates if the pod was // mutated. -func deleteConfigWebhookVolumeAndMounts(pod *corev1.Pod) bool { - mutated := false - +func deleteConfigWebhookVolumesAndMounts(pod *corev1.Pod) bool { + originalNumberOfVolumes := len(pod.Spec.Volumes) // Delete the volume added by the config webhook - for i, vol := range pod.Spec.Volumes { - if vol.Name == configWebhook.DatadogVolumeName { - pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...) - mutated = true - break - } - } + pod.Spec.Volumes = slices.DeleteFunc( + pod.Spec.Volumes, + func(volume corev1.Volume) bool { + return slices.Contains(volumeNamesInjectedByConfigWebhook, volume.Name) + }, + ) + mutated := len(pod.Spec.Volumes) != originalNumberOfVolumes deleted := deleteConfigWebhookVolumeMounts(pod.Spec.Containers) mutated = mutated || deleted @@ -204,16 +207,11 @@ func deleteConfigWebhookVolumeMounts(containers []corev1.Container) bool { mutated := false for i, container := range containers { - for j, volMount := range container.VolumeMounts { - if volMount.Name == configWebhook.DatadogVolumeName { - containers[i].VolumeMounts = append( - containers[i].VolumeMounts[:j], - containers[i].VolumeMounts[j+1:]..., - ) - mutated = true - break - } - } + originalNumberOfVolMounts := len(container.VolumeMounts) + containers[i].VolumeMounts = slices.DeleteFunc(container.VolumeMounts, func(volMount corev1.VolumeMount) bool { + return slices.Contains(volumeNamesInjectedByConfigWebhook, volMount.Name) + }) + mutated = mutated || len(container.VolumeMounts) != originalNumberOfVolMounts } return mutated diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go index ee8ceb1a544f5..236d9d3257031 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers_test.go @@ -59,7 +59,6 @@ func TestProviderIsSupported(t *testing.T) { func TestApplyProviderOverrides(t *testing.T) { mockConfig := configmock.New(t) - hostPathType := corev1.HostPathDirectoryOrCreate tests := []struct { name string @@ -170,7 +169,7 @@ func TestApplyProviderOverrides(t *testing.T) { { // This test checks that the volume and volume mounts set by the // config webhook are replaced by ones that works on Fargate. - name: "fargate provider - with volume set by the config webhook", + name: "fargate provider - with volume set by the config webhook (when the type is not socket)", provider: "fargate", basePod: &corev1.Pod{ Spec: corev1.PodSpec{ @@ -201,7 +200,7 @@ func TestApplyProviderOverrides(t *testing.T) { Name: "datadog", VolumeSource: corev1.VolumeSource{ HostPath: &corev1.HostPathVolumeSource{ - Type: &hostPathType, + Type: pointer.Ptr(corev1.HostPathDirectoryOrCreate), Path: "/var/run/datadog", }, }, @@ -276,6 +275,134 @@ func TestApplyProviderOverrides(t *testing.T) { expectError: false, expectMutated: true, }, + { + // Same as the previous test, but this time the injected volumes are + // of socket type. + name: "fargate provider - with volumes set by the config webhook (when the type is socket)", + provider: "fargate", + basePod: &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "app-container", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "datadog-dogstatsd", + MountPath: "/var/run/datadog/dsd.socket", + ReadOnly: true, + }, + { + Name: "datadog-trace-agent", + MountPath: "/var/run/datadog/apm.socket", + ReadOnly: true, + }, + }, + }, + { + Name: agentSidecarContainerName, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "datadog-dogstatsd", + MountPath: "/var/run/datadog/dsd.socket", + ReadOnly: true, + }, + { + Name: "datadog-trace-agent", + MountPath: "/var/run/datadog/apm.socket", + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "datadog-dogstatsd", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/datadog/dsd.socket", + Type: pointer.Ptr(corev1.HostPathSocket), + }, + }, + }, + { + Name: "datadog-trace-agent", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/datadog/apm.socket", + Type: pointer.Ptr(corev1.HostPathSocket), + }, + }, + }, + }, + }, + }, + expectedPodAfterOverride: &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation: "ddsockets", + }, + }, + Spec: corev1.PodSpec{ + ShareProcessNamespace: pointer.Ptr(true), + Containers: []corev1.Container{ + { + Name: "app-container", + Env: []corev1.EnvVar{ + { + Name: "DD_TRACE_AGENT_URL", + Value: "unix:///var/run/datadog/apm.socket", + }, + { + Name: "DD_DOGSTATSD_URL", + Value: "unix:///var/run/datadog/dsd.socket", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "ddsockets", + MountPath: "/var/run/datadog", + ReadOnly: false, + }, + }, + }, + { + Name: agentSidecarContainerName, + Env: []corev1.EnvVar{ + { + Name: "DD_EKS_FARGATE", + Value: "true", + }, + { + Name: "DD_APM_RECEIVER_SOCKET", + Value: "/var/run/datadog/apm.socket", + }, + { + Name: "DD_DOGSTATSD_SOCKET", + Value: "/var/run/datadog/dsd.socket", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "ddsockets", + MountPath: "/var/run/datadog", + ReadOnly: false, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "ddsockets", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + expectError: false, + expectMutated: true, + }, { name: "unsupported provider", provider: "foo-provider", diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation.go index 2ea1076879239..909612cdd3b11 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation.go @@ -29,7 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" mutatecommon "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -41,7 +41,7 @@ const ( // defaultMilliCPURequest defines default milli cpu request number. defaultMilliCPURequest int64 = 50 // 0.05 core // defaultMemoryRequest defines default memory request size. - defaultMemoryRequest int64 = 20 * 1024 * 1024 // 20 MB + defaultMemoryRequest int64 = 100 * 1024 * 1024 // 100 MB (recommended minimum by Alpine) webhookName = "lib_injection" ) @@ -84,13 +84,13 @@ func NewWebhook(wmeta workloadmeta.Component, filter mutatecommon.InjectionFilte return nil, err } - v, err := instrumentationVersion(config.Datadog().GetString("apm_config.instrumentation.version")) + v, err := instrumentationVersion(pkgconfigsetup.Datadog().GetString("apm_config.instrumentation.version")) if err != nil { return nil, fmt.Errorf("invalid version for key apm_config.instrumentation.version: %w", err) } var ( - isEnabled = config.Datadog().GetBool("admission_controller.auto_instrumentation.enabled") + isEnabled = pkgconfigsetup.Datadog().GetBool("admission_controller.auto_instrumentation.enabled") containerRegistry = mutatecommon.ContainerRegistry("admission_controller.auto_instrumentation.container_registry") pinnedLibraries []libInfo ) @@ -102,14 +102,14 @@ func NewWebhook(wmeta workloadmeta.Component, filter mutatecommon.InjectionFilte return &Webhook{ name: webhookName, isEnabled: isEnabled, - endpoint: config.Datadog().GetString("admission_controller.auto_instrumentation.endpoint"), + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.auto_instrumentation.endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, initSecurityContext: initSecurityContext, initResourceRequirements: initResourceRequirements, injectionFilter: filter, containerRegistry: containerRegistry, - injectorImageTag: config.Datadog().GetString("apm_config.instrumentation.injector_image_tag"), + injectorImageTag: pkgconfigsetup.Datadog().GetString("apm_config.instrumentation.injector_image_tag"), pinnedLibraries: pinnedLibraries, version: v, wmeta: wmeta, @@ -228,7 +228,7 @@ func (w *Webhook) inject(pod *corev1.Pod, ns string, _ dynamic.Interface) (bool, // * false - product disactivated, not overridable remotely func securityClientLibraryConfigMutators() []podMutator { boolVal := func(key string) string { - return strconv.FormatBool(config.Datadog().GetBool(key)) + return strconv.FormatBool(pkgconfigsetup.Datadog().GetBool(key)) } return []podMutator{ configKeyEnvVarMutator{ @@ -259,7 +259,7 @@ func profilingClientLibraryConfigMutators() []podMutator { configKeyEnvVarMutator{ envKey: "DD_PROFILING_ENABLED", configKey: "admission_controller.auto_instrumentation.profiling.enabled", - getVal: config.Datadog().GetString, + getVal: pkgconfigsetup.Datadog().GetString, }, } } @@ -289,7 +289,7 @@ func injectApmTelemetryConfig(pod *corev1.Pod) { func getPinnedLibraries(registry string) []libInfo { // If APM Instrumentation is enabled and configuration apm_config.instrumentation.lib_versions specified, // inject only the libraries from the configuration - singleStepLibraryVersions := config.Datadog(). + singleStepLibraryVersions := pkgconfigsetup.Datadog(). GetStringMapString("apm_config.instrumentation.lib_versions") var res []libInfo @@ -351,14 +351,14 @@ func (l *libInfoLanguageDetection) containerMutator(v version) containerMutator // The languages information is available in workloadmeta-store // and attached on the pod's owner. func (w *Webhook) getLibrariesLanguageDetection(pod *corev1.Pod) *libInfoLanguageDetection { - if !config.Datadog().GetBool("language_detection.enabled") || - !config.Datadog().GetBool("language_detection.reporting.enabled") { + if !pkgconfigsetup.Datadog().GetBool("language_detection.enabled") || + !pkgconfigsetup.Datadog().GetBool("language_detection.reporting.enabled") { return nil } return &libInfoLanguageDetection{ libs: w.getAutoDetectedLibraries(pod), - injectionEnabled: config.Datadog().GetBool("admission_controller.auto_instrumentation.inject_auto_detected_libraries"), + injectionEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.auto_instrumentation.inject_auto_detected_libraries"), } } @@ -651,8 +651,8 @@ func initResources() (corev1.ResourceRequirements, error) { var resources = corev1.ResourceRequirements{Limits: corev1.ResourceList{}, Requests: corev1.ResourceList{}} - if config.Datadog().IsSet("admission_controller.auto_instrumentation.init_resources.cpu") { - quantity, err := resource.ParseQuantity(config.Datadog().GetString("admission_controller.auto_instrumentation.init_resources.cpu")) + if pkgconfigsetup.Datadog().IsSet("admission_controller.auto_instrumentation.init_resources.cpu") { + quantity, err := resource.ParseQuantity(pkgconfigsetup.Datadog().GetString("admission_controller.auto_instrumentation.init_resources.cpu")) if err != nil { return resources, err } @@ -663,8 +663,8 @@ func initResources() (corev1.ResourceRequirements, error) { resources.Limits[corev1.ResourceCPU] = *resource.NewMilliQuantity(defaultMilliCPURequest, resource.DecimalSI) } - if config.Datadog().IsSet("admission_controller.auto_instrumentation.init_resources.memory") { - quantity, err := resource.ParseQuantity(config.Datadog().GetString("admission_controller.auto_instrumentation.init_resources.memory")) + if pkgconfigsetup.Datadog().IsSet("admission_controller.auto_instrumentation.init_resources.memory") { + quantity, err := resource.ParseQuantity(pkgconfigsetup.Datadog().GetString("admission_controller.auto_instrumentation.init_resources.memory")) if err != nil { return resources, err } @@ -682,8 +682,8 @@ func parseInitSecurityContext() (*corev1.SecurityContext, error) { securityContext := corev1.SecurityContext{} confKey := "admission_controller.auto_instrumentation.init_security_context" - if config.Datadog().IsSet(confKey) { - confValue := config.Datadog().GetString(confKey) + if pkgconfigsetup.Datadog().IsSet(confKey) { + confValue := pkgconfigsetup.Datadog().GetString(confKey) err := json.Unmarshal([]byte(confValue), &securityContext) if err != nil { return nil, fmt.Errorf("failed to get init security context from configuration, %s=`%s`: %v", confKey, confValue, err) diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go index d7b217f755020..2e776c6c78b3b 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go @@ -640,7 +640,7 @@ func TestExtractLibInfo(t *testing.T) { }, { lang: "dotnet", - image: "registry/dd-lib-dotnet-init:v2", + image: "registry/dd-lib-dotnet-init:v3", }, { lang: "ruby", @@ -1098,7 +1098,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: false, wantCPU: "50m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{}, }, { @@ -1121,7 +1121,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: false, wantCPU: "200m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{}, }, { @@ -1143,7 +1143,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: true, wantCPU: "50m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{}, }, { @@ -1153,7 +1153,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: false, wantCPU: "50m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{"NET_ADMIN", "SYS_TIME"}, @@ -1191,7 +1191,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: false, wantCPU: "50m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, @@ -1358,7 +1358,7 @@ func TestInjectAutoInstrumentation(t *testing.T) { "java": "v1", "python": "v2", "ruby": "v2", - "dotnet": "v2", + "dotnet": "v3", "js": "v5", } diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/injection_filter.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/injection_filter.go index 8f2c542e72705..de75b2b4e290e 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/injection_filter.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/injection_filter.go @@ -11,7 +11,7 @@ import ( "fmt" mutatecommon "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" apiServerCommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -45,7 +45,7 @@ type injectionFilter struct { // This DOES NOT respect `mutate_unlabelled` since it is a namespace // specific check. func (f *injectionFilter) IsNamespaceEligible(ns string) bool { - apmInstrumentationEnabled := config.Datadog().GetBool("apm_config.instrumentation.enabled") + apmInstrumentationEnabled := pkgconfigsetup.Datadog().GetBool("apm_config.instrumentation.enabled") if !apmInstrumentationEnabled { log.Debugf("APM Instrumentation is disabled") @@ -85,8 +85,8 @@ func (f *injectionFilter) Err() error { // are not one of the ones disabled by default. // - Enabled and disabled namespaces: return error. func makeAPMSSINamespaceFilter() (*containers.Filter, error) { - apmEnabledNamespaces := config.Datadog().GetStringSlice("apm_config.instrumentation.enabled_namespaces") - apmDisabledNamespaces := config.Datadog().GetStringSlice("apm_config.instrumentation.disabled_namespaces") + apmEnabledNamespaces := pkgconfigsetup.Datadog().GetStringSlice("apm_config.instrumentation.enabled_namespaces") + apmDisabledNamespaces := pkgconfigsetup.Datadog().GetStringSlice("apm_config.instrumentation.disabled_namespaces") if len(apmEnabledNamespaces) > 0 && len(apmDisabledNamespaces) > 0 { return nil, fmt.Errorf("apm.instrumentation.enabled_namespaces and apm.instrumentation.disabled_namespaces configuration cannot be set together") diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go index dccaf9fa8c9f3..7ddaace808661 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go @@ -113,7 +113,7 @@ func (l language) isSupported() bool { // If this language does not appear in supportedLanguages, it will not be injected. var languageVersions = map[language]string{ java: "v1", // https://datadoghq.atlassian.net/browse/APMON-1064 - dotnet: "v2", // https://datadoghq.atlassian.net/browse/APMON-1067 + dotnet: "v3", // https://datadoghq.atlassian.net/browse/APMON-1390 python: "v2", // https://datadoghq.atlassian.net/browse/APMON-1068 ruby: "v2", // https://datadoghq.atlassian.net/browse/APMON-1066 js: "v5", // https://datadoghq.atlassian.net/browse/APMON-1065 diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go index ecd3119fd6b0b..9debc63664d05 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // containerMutator describes something that can mutate a container. @@ -166,7 +166,7 @@ type configKeyEnvVarMutator struct { } func (c configKeyEnvVarMutator) mutatePod(pod *corev1.Pod) error { - if config.Datadog().IsSet(c.configKey) { + if pkgconfigsetup.Datadog().IsSet(c.configKey) { _ = common.InjectEnv(pod, corev1.EnvVar{Name: c.envKey, Value: c.getVal(c.configKey)}) } diff --git a/pkg/clusteragent/admission/mutate/autoscaling/autoscaling.go b/pkg/clusteragent/admission/mutate/autoscaling/autoscaling.go index 9f4bfa158dfad..feefdef468844 100644 --- a/pkg/clusteragent/admission/mutate/autoscaling/autoscaling.go +++ b/pkg/clusteragent/admission/mutate/autoscaling/autoscaling.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/admission" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/workload" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" admiv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" @@ -39,7 +39,7 @@ type Webhook struct { func NewWebhook(patcher workload.PodPatcher) *Webhook { return &Webhook{ name: webhookName, - isEnabled: config.Datadog().GetBool("autoscaling.workload.enabled"), + isEnabled: pkgconfigsetup.Datadog().GetBool("autoscaling.workload.enabled"), endpoint: webhookEndpoint, resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, diff --git a/pkg/clusteragent/admission/mutate/common/common.go b/pkg/clusteragent/admission/mutate/common/common.go index ba60a05f472b8..7ce0baa5bea05 100644 --- a/pkg/clusteragent/admission/mutate/common/common.go +++ b/pkg/clusteragent/admission/mutate/common/common.go @@ -20,7 +20,7 @@ import ( "k8s.io/client-go/dynamic" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -181,11 +181,11 @@ func containsVolumeMount(volumeMounts []corev1.VolumeMount, element corev1.Volum // config option, and falls back to the default container registry if no // webhook-specific container registry is set. func ContainerRegistry(specificConfigOpt string) string { - if config.Datadog().IsSet(specificConfigOpt) { - return config.Datadog().GetString(specificConfigOpt) + if pkgconfigsetup.Datadog().IsSet(specificConfigOpt) { + return pkgconfigsetup.Datadog().GetString(specificConfigOpt) } - return config.Datadog().GetString("admission_controller.container_registry") + return pkgconfigsetup.Datadog().GetString("admission_controller.container_registry") } // MarkVolumeAsSafeToEvictForAutoscaler adds the Kubernetes cluster-autoscaler diff --git a/pkg/clusteragent/admission/mutate/common/label_selectors.go b/pkg/clusteragent/admission/mutate/common/label_selectors.go index d305427ab782b..2e69d9dcecdb2 100644 --- a/pkg/clusteragent/admission/mutate/common/label_selectors.go +++ b/pkg/clusteragent/admission/mutate/common/label_selectors.go @@ -11,16 +11,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // DefaultLabelSelectors returns the mutating webhooks object selector based on the configuration func DefaultLabelSelectors(useNamespaceSelector bool) (namespaceSelector, objectSelector *metav1.LabelSelector) { var labelSelector metav1.LabelSelector - if config.Datadog().GetBool("admission_controller.mutate_unlabelled") || - config.Datadog().GetBool("apm_config.instrumentation.enabled") || - len(config.Datadog().GetStringSlice("apm_config.instrumentation.enabled_namespaces")) > 0 { + if pkgconfigsetup.Datadog().GetBool("admission_controller.mutate_unlabelled") || + pkgconfigsetup.Datadog().GetBool("apm_config.instrumentation.enabled") || + len(pkgconfigsetup.Datadog().GetStringSlice("apm_config.instrumentation.enabled_namespaces")) > 0 { // Accept all, ignore pods if they're explicitly filtered-out labelSelector = metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ @@ -40,7 +40,7 @@ func DefaultLabelSelectors(useNamespaceSelector bool) (namespaceSelector, object } } - if config.Datadog().GetBool("admission_controller.add_aks_selectors") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.add_aks_selectors") { return aksSelectors(useNamespaceSelector, labelSelector) } diff --git a/pkg/clusteragent/admission/mutate/common/ns_injection_filter.go b/pkg/clusteragent/admission/mutate/common/ns_injection_filter.go index 068e3b4ebaf31..94e05458c551c 100644 --- a/pkg/clusteragent/admission/mutate/common/ns_injection_filter.go +++ b/pkg/clusteragent/admission/mutate/common/ns_injection_filter.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -36,7 +36,7 @@ func (f InjectionFilter) ShouldMutatePod(pod *corev1.Pod) bool { return true } - return config.Datadog().GetBool("admission_controller.mutate_unlabelled") + return pkgconfigsetup.Datadog().GetBool("admission_controller.mutate_unlabelled") } type podMutationLabelFlag int diff --git a/pkg/clusteragent/admission/mutate/config/config.go b/pkg/clusteragent/admission/mutate/config/config.go index 0d43bdfb3c229..874d61eab8f04 100644 --- a/pkg/clusteragent/admission/mutate/config/config.go +++ b/pkg/clusteragent/admission/mutate/config/config.go @@ -24,7 +24,7 @@ import ( admCommon "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" apiCommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -50,9 +50,15 @@ const ( socket = "socket" service = "service" - // DatadogVolumeName is the name of the volume used to mount the socket + // DatadogVolumeName is the name of the volume used to mount the sockets when the volume source is a directory DatadogVolumeName = "datadog" + // TraceAgentSocketVolumeName is the name of the volume used to mount the trace agent socket + TraceAgentSocketVolumeName = "datadog-trace-agent" + + // DogstatsdSocketVolumeName is the name of the volume used to mount the dogstatsd socket + DogstatsdSocketVolumeName = "datadog-dogstatsd" + webhookName = "agent_config" ) @@ -69,7 +75,7 @@ var ( agentHostServiceEnvVar = corev1.EnvVar{ Name: agentHostEnvVarName, - Value: config.Datadog().GetString("admission_controller.inject_config.local_service_name") + "." + apiCommon.GetMyNamespace() + ".svc.cluster.local", + Value: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.local_service_name") + "." + apiCommon.GetMyNamespace() + ".svc.cluster.local", } defaultDdEntityIDEnvVar = corev1.EnvVar{ @@ -84,12 +90,12 @@ var ( traceURLSocketEnvVar = corev1.EnvVar{ Name: traceURLEnvVarName, - Value: config.Datadog().GetString("admission_controller.inject_config.trace_agent_socket"), + Value: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.trace_agent_socket"), } dogstatsdURLSocketEnvVar = corev1.EnvVar{ Name: dogstatsdURLEnvVarName, - Value: config.Datadog().GetString("admission_controller.inject_config.dogstatsd_socket"), + Value: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.dogstatsd_socket"), } ) @@ -109,11 +115,11 @@ type Webhook struct { func NewWebhook(wmeta workloadmeta.Component, injectionFilter common.InjectionFilter) *Webhook { return &Webhook{ name: webhookName, - isEnabled: config.Datadog().GetBool("admission_controller.inject_config.enabled"), - endpoint: config.Datadog().GetString("admission_controller.inject_config.endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.inject_config.enabled"), + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, - mode: config.Datadog().GetString("admission_controller.inject_config.mode"), + mode: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.mode"), wmeta: wmeta, injectionFilter: injectionFilter, } @@ -184,15 +190,10 @@ func (w *Webhook) inject(pod *corev1.Pod, _ string, _ dynamic.Interface) (bool, case service: injectedConfig = common.InjectEnv(pod, agentHostServiceEnvVar) case socket: - volume, volumeMount := buildVolume(DatadogVolumeName, config.Datadog().GetString("admission_controller.inject_config.socket_path"), true) - injectedVol := common.InjectVolume(pod, volume, volumeMount) - if injectedVol { - common.MarkVolumeAsSafeToEvictForAutoscaler(pod, DatadogVolumeName) - } - + injectedVolumes := injectSocketVolumes(pod) injectedEnv := common.InjectEnv(pod, traceURLSocketEnvVar) injectedEnv = common.InjectEnv(pod, dogstatsdURLSocketEnvVar) || injectedEnv - injectedConfig = injectedEnv || injectedVol + injectedConfig = injectedVolumes || injectedEnv default: log.Errorf("invalid injection mode %q", w.mode) return false, errors.New(metrics.InvalidInput) @@ -249,14 +250,13 @@ func injectExternalDataEnvVar(pod *corev1.Pod) (injected bool) { return } -func buildVolume(volumeName, path string, readOnly bool) (corev1.Volume, corev1.VolumeMount) { - pathType := corev1.HostPathDirectoryOrCreate +func buildVolume(volumeName, path string, hostpathType corev1.HostPathType, readOnly bool) (corev1.Volume, corev1.VolumeMount) { volume := corev1.Volume{ Name: volumeName, VolumeSource: corev1.VolumeSource{ HostPath: &corev1.HostPathVolumeSource{ Path: path, - Type: &pathType, + Type: &hostpathType, }, }, } @@ -269,3 +269,52 @@ func buildVolume(volumeName, path string, readOnly bool) (corev1.Volume, corev1. return volume, volumeMount } + +// injectSocketVolumes injects the volumes for the dogstatsd and trace agent +// sockets. +// +// The type of the volume injected can be either a directory or a socket +// depending on the configuration. They offer different trade-offs. Using a +// socket ensures no lost traces or dogstatsd metrics but can cause the pod to +// wait if the agent has issues that prevent it from creating the sockets. +// +// This function returns true if at least one volume was injected. +func injectSocketVolumes(pod *corev1.Pod) bool { + var injectedVolNames []string + + if pkgconfigsetup.Datadog().GetBool("admission_controller.inject_config.type_socket_volumes") { + volumes := map[string]string{ + DogstatsdSocketVolumeName: strings.TrimPrefix( + pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.dogstatsd_socket"), "unix://", + ), + TraceAgentSocketVolumeName: strings.TrimPrefix( + pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.trace_agent_socket"), "unix://", + ), + } + + for volumeName, volumePath := range volumes { + volume, volumeMount := buildVolume(volumeName, volumePath, corev1.HostPathSocket, true) + injectedVol := common.InjectVolume(pod, volume, volumeMount) + if injectedVol { + injectedVolNames = append(injectedVolNames, volumeName) + } + } + } else { + volume, volumeMount := buildVolume( + DatadogVolumeName, + pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.socket_path"), + corev1.HostPathDirectoryOrCreate, + true, + ) + injectedVol := common.InjectVolume(pod, volume, volumeMount) + if injectedVol { + injectedVolNames = append(injectedVolNames, DatadogVolumeName) + } + } + + for _, volName := range injectedVolNames { + common.MarkVolumeAsSafeToEvictForAutoscaler(pod, volName) + } + + return len(injectedVolNames) > 0 +} diff --git a/pkg/clusteragent/admission/mutate/config/config_test.go b/pkg/clusteragent/admission/mutate/config/config_test.go index c8dd5437edf85..6321412ac1d0b 100644 --- a/pkg/clusteragent/admission/mutate/config/config_test.go +++ b/pkg/clusteragent/admission/mutate/config/config_test.go @@ -10,6 +10,7 @@ package config import ( "encoding/json" "os" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -307,6 +308,7 @@ func TestInjectSocket(t *testing.T) { injected, err := webhook.inject(pod, "", nil) assert.Nil(t, err) assert.True(t, injected) + assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_TRACE_AGENT_URL", "unix:///var/run/datadog/apm.socket")) assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_DOGSTATSD_URL", "unix:///var/run/datadog/dsd.socket")) assert.Equal(t, pod.Spec.Containers[0].VolumeMounts[0].MountPath, "/var/run/datadog") @@ -318,6 +320,67 @@ func TestInjectSocket(t *testing.T) { assert.Equal(t, "datadog", pod.Annotations[mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation]) } +func TestInjectSocket_VolumeTypeSocket(t *testing.T) { + pod := mutatecommon.FakePodWithContainer("foo-pod", corev1.Container{}) + pod = mutatecommon.WithLabels(pod, map[string]string{"admission.datadoghq.com/enabled": "true", "admission.datadoghq.com/config.mode": "socket"}) + wmeta := fxutil.Test[workloadmeta.Component]( + t, + core.MockBundle(), + workloadmetafxmock.MockModule(workloadmeta.NewParams()), + fx.Replace(config.MockParams{ + Overrides: map[string]interface{}{"admission_controller.inject_config.type_socket_volumes": true}, + }), + ) + webhook := NewWebhook(wmeta, autoinstrumentation.GetInjectionFilter()) + injected, err := webhook.inject(pod, "", nil) + assert.Nil(t, err) + assert.True(t, injected) + + assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_TRACE_AGENT_URL", "unix:///var/run/datadog/apm.socket")) + assert.Contains(t, pod.Spec.Containers[0].Env, mutatecommon.FakeEnvWithValue("DD_DOGSTATSD_URL", "unix:///var/run/datadog/dsd.socket")) + + expectedVolumeMounts := []corev1.VolumeMount{ + { + Name: "datadog-dogstatsd", + MountPath: "/var/run/datadog/dsd.socket", + ReadOnly: true, + }, + { + Name: "datadog-trace-agent", + MountPath: "/var/run/datadog/apm.socket", + ReadOnly: true, + }, + } + assert.ElementsMatch(t, pod.Spec.Containers[0].VolumeMounts, expectedVolumeMounts) + + expectedVolumes := []corev1.Volume{ + { + Name: "datadog-dogstatsd", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/datadog/dsd.socket", + Type: pointer.Ptr(corev1.HostPathSocket), + }, + }, + }, + { + Name: "datadog-trace-agent", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/datadog/apm.socket", + Type: pointer.Ptr(corev1.HostPathSocket), + }, + }, + }, + } + assert.ElementsMatch(t, pod.Spec.Volumes, expectedVolumes) + + safeToEvictVolumes := strings.Split(pod.Annotations[mutatecommon.K8sAutoscalerSafeToEvictVolumesAnnotation], ",") + assert.Len(t, safeToEvictVolumes, 2) + assert.Contains(t, safeToEvictVolumes, "datadog-dogstatsd") + assert.Contains(t, safeToEvictVolumes, "datadog-trace-agent") +} + func TestInjectSocketWithConflictingVolumeAndInitContainer(t *testing.T) { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -339,7 +402,11 @@ func TestInjectSocketWithConflictingVolumeAndInitContainer(t *testing.T) { VolumeMounts: []corev1.VolumeMount{ { Name: "foo", - MountPath: "/var/run/datadog", + MountPath: "/var/run/datadog/dsd.socket", + }, + { + Name: "bar", + MountPath: "/var/run/datadog/apm.socket", }, }, }, diff --git a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go index 86c6c3ecf30c2..91cec20c7c9d2 100644 --- a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go +++ b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go @@ -36,7 +36,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation/k8scp" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation/k8sexec" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/resolvers/usersessions" "github.com/DataDog/datadog-agent/pkg/util/containers" apiserverUtils "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -96,9 +96,9 @@ type WebhookForPods struct { func newWebhookForPods(admissionFunc admission.WebhookFunc) *WebhookForPods { return &WebhookForPods{ name: webhookForPodsName, - isEnabled: config.Datadog().GetBool("admission_controller.cws_instrumentation.enabled") && - len(config.Datadog().GetString("admission_controller.cws_instrumentation.image_name")) > 0, - endpoint: config.Datadog().GetString("admission_controller.cws_instrumentation.pod_endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.cws_instrumentation.enabled") && + len(pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.image_name")) > 0, + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.pod_endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, admissionFunc: admissionFunc, @@ -156,9 +156,9 @@ type WebhookForCommands struct { func newWebhookForCommands(admissionFunc admission.WebhookFunc) *WebhookForCommands { return &WebhookForCommands{ name: webhookForCommandsName, - isEnabled: config.Datadog().GetBool("admission_controller.cws_instrumentation.enabled") && - len(config.Datadog().GetString("admission_controller.cws_instrumentation.image_name")) > 0, - endpoint: config.Datadog().GetString("admission_controller.cws_instrumentation.command_endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.cws_instrumentation.enabled") && + len(pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.image_name")) > 0, + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.command_endpoint"), resources: []string{"pods/exec"}, operations: []admiv1.OperationType{admiv1.Connect}, admissionFunc: admissionFunc, @@ -205,7 +205,7 @@ func (w *WebhookForCommands) MutateFunc() admission.WebhookFunc { func parseCWSInitContainerResources() (*corev1.ResourceRequirements, error) { var resources = &corev1.ResourceRequirements{Limits: corev1.ResourceList{}, Requests: corev1.ResourceList{}} - if cpu := config.Datadog().GetString("admission_controller.cws_instrumentation.init_resources.cpu"); len(cpu) > 0 { + if cpu := pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.init_resources.cpu"); len(cpu) > 0 { quantity, err := resource.ParseQuantity(cpu) if err != nil { return nil, err @@ -214,7 +214,7 @@ func parseCWSInitContainerResources() (*corev1.ResourceRequirements, error) { resources.Limits[corev1.ResourceCPU] = quantity } - if mem := config.Datadog().GetString("admission_controller.cws_instrumentation.init_resources.memory"); len(mem) > 0 { + if mem := pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.init_resources.memory"); len(mem) > 0 { quantity, err := resource.ParseQuantity(mem) if err != nil { return nil, err @@ -291,16 +291,16 @@ func NewCWSInstrumentation(wmeta workloadmeta.Component) (*CWSInstrumentation, e // Parse filters ci.filter, err = containers.NewFilter( containers.GlobalFilter, - config.Datadog().GetStringSlice("admission_controller.cws_instrumentation.include"), - config.Datadog().GetStringSlice("admission_controller.cws_instrumentation.exclude"), + pkgconfigsetup.Datadog().GetStringSlice("admission_controller.cws_instrumentation.include"), + pkgconfigsetup.Datadog().GetStringSlice("admission_controller.cws_instrumentation.exclude"), ) if err != nil { return nil, fmt.Errorf("couldn't initialize filter: %w", err) } // Parse init container image - cwsInjectorImageName := config.Datadog().GetString("admission_controller.cws_instrumentation.image_name") - cwsInjectorImageTag := config.Datadog().GetString("admission_controller.cws_instrumentation.image_tag") + cwsInjectorImageName := pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.image_name") + cwsInjectorImageTag := pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.image_tag") cwsInjectorContainerRegistry := common.ContainerRegistry("admission_controller.cws_instrumentation.container_registry") @@ -317,16 +317,16 @@ func NewCWSInstrumentation(wmeta workloadmeta.Component) (*CWSInstrumentation, e } // parse mode - ci.mode, err = ParseInstrumentationMode(config.Datadog().GetString("admission_controller.cws_instrumentation.mode")) + ci.mode, err = ParseInstrumentationMode(pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.mode")) if err != nil { return nil, fmt.Errorf("can't initiatilize CWS Instrumentation: %v", err) } - ci.mountVolumeForRemoteCopy = config.Datadog().GetBool("admission_controller.cws_instrumentation.remote_copy.mount_volume") - ci.directoryForRemoteCopy = config.Datadog().GetString("admission_controller.cws_instrumentation.remote_copy.directory") + ci.mountVolumeForRemoteCopy = pkgconfigsetup.Datadog().GetBool("admission_controller.cws_instrumentation.remote_copy.mount_volume") + ci.directoryForRemoteCopy = pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.remote_copy.directory") if ci.mode == RemoteCopy { // build the cluster agent service account - serviceAccountName := config.Datadog().GetString("cluster_agent.service_account_name") + serviceAccountName := pkgconfigsetup.Datadog().GetString("cluster_agent.service_account_name") if len(serviceAccountName) == 0 { return nil, fmt.Errorf("can't initialize CWS Instrumentation in %s mode without providing a service account name in config (cluster_agent.service_account_name)", RemoteCopy) } @@ -764,8 +764,8 @@ func injectCWSInitContainer(pod *corev1.Pod, resources *corev1.ResourceRequireme func labelSelectors(useNamespaceSelector bool) (namespaceSelector, objectSelector *metav1.LabelSelector) { var labelSelector metav1.LabelSelector - if config.Datadog().GetBool("admission_controller.cws_instrumentation.mutate_unlabelled") || - config.Datadog().GetBool("admission_controller.mutate_unlabelled") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.cws_instrumentation.mutate_unlabelled") || + pkgconfigsetup.Datadog().GetBool("admission_controller.mutate_unlabelled") { // Accept all, ignore pods if they're explicitly filtered-out labelSelector = metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ diff --git a/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go b/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go index b48138d2d2ed0..2fd2279eaeaa8 100644 --- a/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go +++ b/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go @@ -26,7 +26,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -56,8 +56,8 @@ type Webhook struct { func NewWebhook(wmeta workloadmeta.Component, injectionFilter common.InjectionFilter) *Webhook { return &Webhook{ name: webhookName, - isEnabled: config.Datadog().GetBool("admission_controller.inject_tags.enabled"), - endpoint: config.Datadog().GetString("admission_controller.inject_tags.endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.inject_tags.enabled"), + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.inject_tags.endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, ownerCacheTTL: ownerCacheTTL(), @@ -270,9 +270,9 @@ func (w *Webhook) getAndCacheOwner(info *ownerInfo, ns string, dc dynamic.Interf } func ownerCacheTTL() time.Duration { - if config.Datadog().IsSet("admission_controller.pod_owners_cache_validity") { // old option. Kept for backwards compatibility - return config.Datadog().GetDuration("admission_controller.pod_owners_cache_validity") * time.Minute + if pkgconfigsetup.Datadog().IsSet("admission_controller.pod_owners_cache_validity") { // old option. Kept for backwards compatibility + return pkgconfigsetup.Datadog().GetDuration("admission_controller.pod_owners_cache_validity") * time.Minute } - return config.Datadog().GetDuration("admission_controller.inject_tags.pod_owners_cache_validity") * time.Minute + return pkgconfigsetup.Datadog().GetDuration("admission_controller.inject_tags.pod_owners_cache_validity") * time.Minute } diff --git a/pkg/clusteragent/admission/patch/provider.go b/pkg/clusteragent/admission/patch/provider.go index 10ff1593b61fb..2b1469533c38a 100644 --- a/pkg/clusteragent/admission/patch/provider.go +++ b/pkg/clusteragent/admission/patch/provider.go @@ -11,8 +11,8 @@ import ( "errors" "github.com/DataDog/datadog-agent/pkg/clusteragent/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" rcclient "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) type patchProvider interface { @@ -21,12 +21,12 @@ type patchProvider interface { } func newPatchProvider(rcClient *rcclient.Client, isLeaderNotif <-chan struct{}, telemetryCollector telemetry.TelemetryCollector, clusterName string) (patchProvider, error) { - if config.IsRemoteConfigEnabled(config.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { return newRemoteConfigProvider(rcClient, isLeaderNotif, telemetryCollector, clusterName) } - if config.Datadog().GetBool("admission_controller.auto_instrumentation.patcher.fallback_to_file_provider") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.auto_instrumentation.patcher.fallback_to_file_provider") { // Use the file config provider for e2e testing only (it replaces RC as a source of configs) - file := config.Datadog().GetString("admission_controller.auto_instrumentation.patcher.file_provider_path") + file := pkgconfigsetup.Datadog().GetString("admission_controller.auto_instrumentation.patcher.file_provider_path") return newfileProvider(file, isLeaderNotif, clusterName), nil } return nil, errors.New("remote config is disabled") diff --git a/pkg/clusteragent/admission/start.go b/pkg/clusteragent/admission/start.go index 596ed8caa4d10..6da39581700fc 100644 --- a/pkg/clusteragent/admission/start.go +++ b/pkg/clusteragent/admission/start.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/controllers/secret" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/controllers/webhook" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/workload" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -37,18 +37,18 @@ type ControllerContext struct { // StartControllers starts the secret and webhook controllers func StartControllers(ctx ControllerContext, wmeta workloadmeta.Component, pa workload.PodPatcher) ([]webhook.MutatingWebhook, error) { - if !config.Datadog().GetBool("admission_controller.enabled") { + if !pkgconfigsetup.Datadog().GetBool("admission_controller.enabled") { log.Info("Admission controller is disabled") return nil, nil } certConfig := secret.NewCertConfig( - config.Datadog().GetDuration("admission_controller.certificate.expiration_threshold")*time.Hour, - config.Datadog().GetDuration("admission_controller.certificate.validity_bound")*time.Hour) + pkgconfigsetup.Datadog().GetDuration("admission_controller.certificate.expiration_threshold")*time.Hour, + pkgconfigsetup.Datadog().GetDuration("admission_controller.certificate.validity_bound")*time.Hour) secretConfig := secret.NewConfig( common.GetResourcesNamespace(), - config.Datadog().GetString("admission_controller.certificate.secret_name"), - config.Datadog().GetString("admission_controller.service_name"), + pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name"), + pkgconfigsetup.Datadog().GetString("admission_controller.service_name"), certConfig) secretController := secret.NewController( ctx.Client, diff --git a/pkg/clusteragent/admission/status.go b/pkg/clusteragent/admission/status.go index 3492640df251b..315f30ea26190 100644 --- a/pkg/clusteragent/admission/status.go +++ b/pkg/clusteragent/admission/status.go @@ -16,7 +16,7 @@ import ( "strconv" "github.com/DataDog/datadog-agent/comp/core/status" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/certificate" @@ -29,14 +29,14 @@ import ( // GetStatus returns status info for the secret and webhook controllers. func GetStatus(apiCl kubernetes.Interface) map[string]interface{} { status := make(map[string]interface{}) - if !config.Datadog().GetBool("admission_controller.enabled") { + if !pkgconfigsetup.Datadog().GetBool("admission_controller.enabled") { status["Disabled"] = "The admission controller is not enabled on the Cluster Agent" return status } ns := common.GetResourcesNamespace() - webhookName := config.Datadog().GetString("admission_controller.webhook_name") - secretName := config.Datadog().GetString("admission_controller.certificate.secret_name") + webhookName := pkgconfigsetup.Datadog().GetString("admission_controller.webhook_name") + secretName := pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name") status["WebhookName"] = webhookName status["SecretName"] = fmt.Sprintf("%s/%s", ns, secretName) diff --git a/pkg/clusteragent/admission/util.go b/pkg/clusteragent/admission/util.go index 4f385ba151b64..90f0f893add60 100644 --- a/pkg/clusteragent/admission/util.go +++ b/pkg/clusteragent/admission/util.go @@ -12,7 +12,7 @@ import ( "strconv" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -24,7 +24,7 @@ import ( // Returns true if `namespace_selector_fallback` is enabled and k8s version is between 1.10 and 1.14 (included). // Kubernetes 1.15+ supports object selectors. func useNamespaceSelector(discoveryCl discovery.DiscoveryInterface) (bool, error) { - if !config.Datadog().GetBool("admission_controller.namespace_selector_fallback") { + if !pkgconfigsetup.Datadog().GetBool("admission_controller.namespace_selector_fallback") { return false, nil } diff --git a/pkg/clusteragent/api/leader_forwarder.go b/pkg/clusteragent/api/leader_forwarder.go index d77d0bb22e901..a5989f62d112b 100644 --- a/pkg/clusteragent/api/leader_forwarder.go +++ b/pkg/clusteragent/api/leader_forwarder.go @@ -20,7 +20,7 @@ import ( "github.com/cihub/seelog" - "github.com/DataDog/datadog-agent/pkg/config" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) const ( @@ -43,7 +43,7 @@ type LeaderForwarder struct { // NewLeaderForwarder initializes a new LeaderForwarder instance and is used for test purposes func NewLeaderForwarder(apiPort, maxConnections int) *LeaderForwarder { // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := config.NewLogWriter(4, seelog.DebugLvl) + logWriter, _ := pkglogsetup.NewLogWriter(4, seelog.DebugLvl) return &LeaderForwarder{ apiPort: strconv.Itoa(apiPort), transport: &http.Transport{ diff --git a/pkg/clusteragent/api/leader_handler.go b/pkg/clusteragent/api/leader_handler.go index 04f744bc6d763..b87e5197d7240 100644 --- a/pkg/clusteragent/api/leader_handler.go +++ b/pkg/clusteragent/api/leader_handler.go @@ -12,7 +12,7 @@ package api import ( "net/http" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -61,7 +61,7 @@ func WithLeaderProxyHandler(handlerName string, preHandler RequestPreHandler, le lph := LeaderProxyHandler{ handlerName: handlerName, leaderForwarder: GetGlobalLeaderForwarder(), - leaderElectionEnabled: config.Datadog().GetBool("leader_election"), + leaderElectionEnabled: pkgconfigsetup.Datadog().GetBool("leader_election"), preHandler: preHandler, leaderHandler: leaderHandler, } diff --git a/pkg/clusteragent/autoscaling/custommetrics/provider.go b/pkg/clusteragent/autoscaling/custommetrics/provider.go index 84041c0269f26..5d50c7960e00d 100644 --- a/pkg/clusteragent/autoscaling/custommetrics/provider.go +++ b/pkg/clusteragent/autoscaling/custommetrics/provider.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/custom-metrics-apiserver/pkg/provider" "sigs.k8s.io/custom-metrics-apiserver/pkg/provider/defaults" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -53,7 +53,7 @@ type datadogProvider struct { // NewDatadogProvider creates a Custom Metrics and External Metrics Provider. func NewDatadogProvider(ctx context.Context, client dynamic.Interface, mapper apimeta.RESTMapper, store Store) provider.ExternalMetricsProvider { - maxAge := config.Datadog().GetInt64("external_metrics_provider.local_copy_refresh_rate") + maxAge := pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.local_copy_refresh_rate") d := &datadogProvider{ client: client, mapper: mapper, diff --git a/pkg/clusteragent/autoscaling/custommetrics/status.go b/pkg/clusteragent/autoscaling/custommetrics/status.go index a31e98e525169..72da47185b143 100644 --- a/pkg/clusteragent/autoscaling/custommetrics/status.go +++ b/pkg/clusteragent/autoscaling/custommetrics/status.go @@ -12,19 +12,19 @@ import ( "k8s.io/client-go/kubernetes" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" ) // GetStatus returns status info for the Custom Metrics Server. func GetStatus(apiCl kubernetes.Interface) map[string]interface{} { status := make(map[string]interface{}) - if !config.Datadog().GetBool("external_metrics_provider.enabled") { + if !pkgconfigsetup.Datadog().GetBool("external_metrics_provider.enabled") { status["Disabled"] = "The external metrics provider is not enabled on the Cluster Agent" return status } - if config.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") { + if pkgconfigsetup.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") { status["NoStatus"] = "External metrics provider uses DatadogMetric - Check status directly from Kubernetes with: `kubectl get datadogmetric`" return status } diff --git a/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go b/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go index b88453e1a08c6..9dfbe88ae11d8 100644 --- a/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go +++ b/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go @@ -14,7 +14,7 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -40,7 +40,7 @@ type configMapStore struct { // GetConfigmapName returns the name of the ConfigMap used to store the state of the Custom Metrics Provider func GetConfigmapName() string { - return config.Datadog().GetString("hpa_configmap_name") + return pkgconfigsetup.Datadog().GetString("hpa_configmap_name") } // NewConfigMapStore returns a new store backed by a configmap. The configmap will be created diff --git a/pkg/clusteragent/autoscaling/externalmetrics/provider.go b/pkg/clusteragent/autoscaling/externalmetrics/provider.go index 06f8a7d960dd6..bf07e1251413e 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/provider.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/provider.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/custom-metrics-apiserver/pkg/provider/defaults" datadogclient "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection" @@ -59,17 +59,17 @@ func NewDatadogMetricProvider(ctx context.Context, apiCl *apiserver.APIClient, d return nil, fmt.Errorf("Unable to create DatadogMetricProvider as LeaderElection failed with: %v", err) } - aggregator := config.Datadog().GetString("external_metrics.aggregator") - rollup := config.Datadog().GetInt("external_metrics_provider.rollup") + aggregator := pkgconfigsetup.Datadog().GetString("external_metrics.aggregator") + rollup := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.rollup") setQueryConfigValues(aggregator, rollup) - refreshPeriod := config.Datadog().GetInt64("external_metrics_provider.refresh_period") - metricsMaxAge = int64(math.Max(config.Datadog().GetFloat64("external_metrics_provider.max_age"), float64(3*rollup))) - metricsQueryValidityPeriod = int64(config.Datadog().GetFloat64("external_metrics_provider.query_validity_period")) - splitBatchBackoffOnErrors := config.Datadog().GetBool("external_metrics_provider.split_batches_with_backoff") + refreshPeriod := pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.refresh_period") + metricsMaxAge = int64(math.Max(pkgconfigsetup.Datadog().GetFloat64("external_metrics_provider.max_age"), float64(3*rollup))) + metricsQueryValidityPeriod = int64(pkgconfigsetup.Datadog().GetFloat64("external_metrics_provider.query_validity_period")) + splitBatchBackoffOnErrors := pkgconfigsetup.Datadog().GetBool("external_metrics_provider.split_batches_with_backoff") autogenNamespace := common.GetResourcesNamespace() - autogenEnabled := config.Datadog().GetBool("external_metrics_provider.enable_datadogmetric_autogen") - wpaEnabled := config.Datadog().GetBool("external_metrics_provider.wpa_controller") + autogenEnabled := pkgconfigsetup.Datadog().GetBool("external_metrics_provider.enable_datadogmetric_autogen") + wpaEnabled := pkgconfigsetup.Datadog().GetBool("external_metrics_provider.wpa_controller") provider := &datadogMetricProvider{ apiCl: apiCl, diff --git a/pkg/clusteragent/autoscaling/workload/controller.go b/pkg/clusteragent/autoscaling/workload/controller.go index 27ae6f77070aa..d6d8b9dd22b41 100644 --- a/pkg/clusteragent/autoscaling/workload/controller.go +++ b/pkg/clusteragent/autoscaling/workload/controller.go @@ -14,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" scaleclient "k8s.io/client-go/scale" @@ -241,13 +242,13 @@ func (c *Controller) syncPodAutoscaler(ctx context.Context, key, ns, name string // and compare it with the one in the PodAutoscaler. If they differ, we should update the PodAutoscaler // otherwise store the Generation if podAutoscalerInternal.Generation() != podAutoscaler.Generation { - localHash, err := autoscaling.ObjectHash(podAutoscalerInternal.Spec) + localHash, err := autoscaling.ObjectHash(podAutoscalerInternal.Spec()) if err != nil { c.store.Unlock(key) return autoscaling.Requeue, fmt.Errorf("Failed to compute Spec hash for PodAutoscaler: %s/%s, err: %v", ns, name, err) } - remoteHash, err := autoscaling.ObjectHash(podAutoscaler.Spec) + remoteHash, err := autoscaling.ObjectHash(&podAutoscaler.Spec) if err != nil { c.store.Unlock(key) return autoscaling.Requeue, fmt.Errorf("Failed to compute Spec hash for PodAutoscaler: %s/%s, err: %v", ns, name, err) @@ -282,12 +283,31 @@ func (c *Controller) syncPodAutoscaler(ctx context.Context, key, ns, name string return autoscaling.NoRequeue, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, validationErr, podAutoscalerInternal, podAutoscaler) } + // Get autoscaler target + targetGVK, targetErr := podAutoscalerInternal.TargetGVK() + if targetErr != nil { + podAutoscalerInternal.SetError(targetErr) + return autoscaling.NoRequeue, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, targetErr, podAutoscalerInternal, podAutoscaler) + } + target := NamespacedPodOwner{ + Namespace: podAutoscalerInternal.Namespace(), + Name: podAutoscalerInternal.Spec().TargetRef.Name, + Kind: targetGVK.Kind, + } + // Now that everything is synced, we can perform the actual processing - result, scalingErr := c.handleScaling(ctx, podAutoscaler, &podAutoscalerInternal) + result, scalingErr := c.handleScaling(ctx, podAutoscaler, &podAutoscalerInternal, targetGVK, target) + + // Update current replicas + pods := c.podWatcher.GetPodsForOwner(target) + currentReplicas := len(pods) + podAutoscalerInternal.SetCurrentReplicas(int32(currentReplicas)) + + // Update status based on latest state return result, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, scalingErr, podAutoscalerInternal, podAutoscaler) } -func (c *Controller) handleScaling(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, podAutoscalerInternal *model.PodAutoscalerInternal) (autoscaling.ProcessResult, error) { +func (c *Controller) handleScaling(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, podAutoscalerInternal *model.PodAutoscalerInternal, targetGVK schema.GroupVersionKind, target NamespacedPodOwner) (autoscaling.ProcessResult, error) { // TODO: While horizontal scaling is in progress we should not start vertical scaling // While vertical scaling is in progress we should only allow horizontal upscale horizontalRes, err := c.horizontalController.sync(ctx, podAutoscaler, podAutoscalerInternal) @@ -295,7 +315,7 @@ func (c *Controller) handleScaling(ctx context.Context, podAutoscaler *datadoghq return horizontalRes, err } - verticalRes, err := c.verticalController.sync(ctx, podAutoscaler, podAutoscalerInternal) + verticalRes, err := c.verticalController.sync(ctx, podAutoscaler, podAutoscalerInternal, targetGVK, target) if err != nil { return verticalRes, err } diff --git a/pkg/clusteragent/autoscaling/workload/controller_horizontal.go b/pkg/clusteragent/autoscaling/workload/controller_horizontal.go index 9c3c043d02c21..3a9c0ac675033 100644 --- a/pkg/clusteragent/autoscaling/workload/controller_horizontal.go +++ b/pkg/clusteragent/autoscaling/workload/controller_horizontal.go @@ -79,9 +79,6 @@ func (hr *horizontalController) sync(ctx context.Context, podAutoscaler *datadog return autoscaling.Requeue, err } - // Update current replicas - autoscalerInternal.SetCurrentReplicas(scale.Status.Replicas) - return hr.performScaling(ctx, podAutoscaler, autoscalerInternal, gr, scale) } diff --git a/pkg/clusteragent/autoscaling/workload/controller_horizontal_test.go b/pkg/clusteragent/autoscaling/workload/controller_horizontal_test.go index 009811fbf4a39..f306f680bbc34 100644 --- a/pkg/clusteragent/autoscaling/workload/controller_horizontal_test.go +++ b/pkg/clusteragent/autoscaling/workload/controller_horizontal_test.go @@ -111,7 +111,6 @@ func (f *horizontalControllerFixture) testScalingDecision(args horizontalScaling f.scaler.AssertNumberOfCalls(f.t, "get", 1) f.scaler.AssertNumberOfCalls(f.t, "update", expectedUpdateCalls) - args.fakePai.CurrentReplicas = pointer.Ptr[int32](args.statusReplicas) if scaleActionExpected && args.scaleError == nil { // Update fakePai with the new expected state action := &datadoghq.DatadogPodAutoscalerHorizontalAction{ @@ -142,8 +141,9 @@ func TestHorizontalControllerSyncPrerequisites(t *testing.T) { autoscalerName := "test" fakePai := &model.FakePodAutoscalerInternal{ - Namespace: autoscalerNamespace, - Name: autoscalerName, + Namespace: autoscalerNamespace, + Name: autoscalerName, + CurrentReplicas: pointer.Ptr[int32](5), } // Test case: no Spec, no action taken @@ -165,7 +165,7 @@ func TestHorizontalControllerSyncPrerequisites(t *testing.T) { model.AssertPodAutoscalersEqual(t, fakePai.Build(), autoscaler) // Test case: Correct Spec and GVK, but no scaling values - // Should only update replica count + // Should do nothing expectedGVK := schema.GroupVersionKind{ Group: "apps", Version: "v1", @@ -304,7 +304,8 @@ func TestHorizontalControllerSyncScaleDecisions(t *testing.T) { Replicas: 5, }, }, - TargetGVK: expectedGVK, + TargetGVK: expectedGVK, + CurrentReplicas: pointer.Ptr[int32](5), } // Step: same number of replicas, no action taken, only updating status diff --git a/pkg/clusteragent/autoscaling/workload/controller_vertical.go b/pkg/clusteragent/autoscaling/workload/controller_vertical.go index 5ea9d8d4f300a..2a940490d875a 100644 --- a/pkg/clusteragent/autoscaling/workload/controller_vertical.go +++ b/pkg/clusteragent/autoscaling/workload/controller_vertical.go @@ -56,7 +56,7 @@ func newVerticalController(clock clock.Clock, eventRecorder record.EventRecorder return res } -func (u *verticalController) sync(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, autoscalerInternal *model.PodAutoscalerInternal) (autoscaling.ProcessResult, error) { +func (u *verticalController) sync(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, autoscalerInternal *model.PodAutoscalerInternal, targetGVK schema.GroupVersionKind, target NamespacedPodOwner) (autoscaling.ProcessResult, error) { scalingValues := autoscalerInternal.ScalingValues() // Check if the autoscaler has a vertical scaling recommendation @@ -67,18 +67,6 @@ func (u *verticalController) sync(ctx context.Context, podAutoscaler *datadoghq. } recomendationID := scalingValues.Vertical.ResourcesHash - targetGVK, err := autoscalerInternal.TargetGVK() - if err != nil { - autoscalerInternal.SetError(err) - return autoscaling.NoRequeue, err - } - - // Get the pod owner from the workload - target := NamespacedPodOwner{ - Namespace: autoscalerInternal.Namespace(), - Name: autoscalerInternal.Spec().TargetRef.Name, - Kind: targetGVK.Kind, - } // Get the pods for the pod owner pods := u.podWatcher.GetPodsForOwner(target) diff --git a/pkg/clusteragent/clusterchecks/dispatcher_isolate_test.go b/pkg/clusteragent/clusterchecks/dispatcher_isolate_test.go index ece79222c8c3d..609d4264fa968 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_isolate_test.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_isolate_test.go @@ -10,19 +10,20 @@ package clusterchecks import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" - "github.com/stretchr/testify/assert" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestIsolateCheckSuccessful(t *testing.T) { testDispatcher := newDispatcher() testDispatcher.store.nodes["A"] = newNodeStore("A", "") - testDispatcher.store.nodes["A"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["A"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["B"] = newNodeStore("B", "") - testDispatcher.store.nodes["B"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["B"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["A"].clcRunnerStats = map[string]types.CLCRunnerStats{ "checkA0": { @@ -100,9 +101,9 @@ func TestIsolateCheckSuccessful(t *testing.T) { func TestIsolateNonExistentCheckFails(t *testing.T) { testDispatcher := newDispatcher() testDispatcher.store.nodes["A"] = newNodeStore("A", "") - testDispatcher.store.nodes["A"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["A"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["B"] = newNodeStore("B", "") - testDispatcher.store.nodes["B"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["B"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["A"].clcRunnerStats = map[string]types.CLCRunnerStats{ "checkA0": { @@ -178,7 +179,7 @@ func TestIsolateNonExistentCheckFails(t *testing.T) { func TestIsolateCheckOnlyOneRunnerFails(t *testing.T) { testDispatcher := newDispatcher() testDispatcher.store.nodes["A"] = newNodeStore("A", "") - testDispatcher.store.nodes["A"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["A"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["A"].clcRunnerStats = map[string]types.CLCRunnerStats{ "checkA0": { diff --git a/pkg/clusteragent/clusterchecks/dispatcher_main.go b/pkg/clusteragent/clusterchecks/dispatcher_main.go index 63048637bb61a..398bf21d2c5c5 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_main.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_main.go @@ -13,7 +13,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -38,10 +38,10 @@ func newDispatcher() *dispatcher { d := &dispatcher{ store: newClusterStore(), } - d.nodeExpirationSeconds = config.Datadog().GetInt64("cluster_checks.node_expiration_timeout") - d.extraTags = config.Datadog().GetStringSlice("cluster_checks.extra_tags") + d.nodeExpirationSeconds = pkgconfigsetup.Datadog().GetInt64("cluster_checks.node_expiration_timeout") + d.extraTags = pkgconfigsetup.Datadog().GetStringSlice("cluster_checks.extra_tags") - excludedChecks := config.Datadog().GetStringSlice("cluster_checks.exclude_checks") + excludedChecks := pkgconfigsetup.Datadog().GetStringSlice("cluster_checks.exclude_checks") // This option will almost always be empty if len(excludedChecks) > 0 { d.excludedChecks = make(map[string]struct{}, len(excludedChecks)) @@ -50,7 +50,7 @@ func newDispatcher() *dispatcher { } } - excludedChecksFromDispatching := config.Datadog().GetStringSlice("cluster_checks.exclude_checks_from_dispatching") + excludedChecksFromDispatching := pkgconfigsetup.Datadog().GetStringSlice("cluster_checks.exclude_checks_from_dispatching") // This option will almost always be empty if len(excludedChecksFromDispatching) > 0 { d.excludedChecksFromDispatching = make(map[string]struct{}, len(excludedChecksFromDispatching)) @@ -59,20 +59,20 @@ func newDispatcher() *dispatcher { } } - d.rebalancingPeriod = config.Datadog().GetDuration("cluster_checks.rebalance_period") + d.rebalancingPeriod = pkgconfigsetup.Datadog().GetDuration("cluster_checks.rebalance_period") hname, _ := hostname.Get(context.TODO()) clusterTagValue := clustername.GetClusterName(context.TODO(), hname) - clusterTagName := config.Datadog().GetString("cluster_checks.cluster_tag_name") + clusterTagName := pkgconfigsetup.Datadog().GetString("cluster_checks.cluster_tag_name") if clusterTagValue != "" { - if clusterTagName != "" && !config.Datadog().GetBool("disable_cluster_name_tag_key") { + if clusterTagName != "" && !pkgconfigsetup.Datadog().GetBool("disable_cluster_name_tag_key") { d.extraTags = append(d.extraTags, fmt.Sprintf("%s:%s", clusterTagName, clusterTagValue)) log.Info("Adding both tags cluster_name and kube_cluster_name. You can use 'disable_cluster_name_tag_key' in the Agent config to keep the kube_cluster_name tag only") } d.extraTags = append(d.extraTags, fmt.Sprintf("kube_cluster_name:%s", clusterTagValue)) } - d.advancedDispatching = config.Datadog().GetBool("cluster_checks.advanced_dispatching_enabled") + d.advancedDispatching = pkgconfigsetup.Datadog().GetBool("cluster_checks.advanced_dispatching_enabled") if !d.advancedDispatching { return d } diff --git a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go index 6b03a704d5953..2e7dd891ab912 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -198,12 +198,12 @@ func (d *dispatcher) updateRunnersStats() { ip := node.clientIP node.RUnlock() - if config.Datadog().GetBool("cluster_checks.rebalance_with_utilization") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.rebalance_with_utilization") { workers, err := d.clcRunnersClient.GetRunnerWorkers(ip) if err != nil { // This can happen in old versions of the runners that do not expose this information. log.Debugf("Cannot get number of workers for node %s with IP %s. Assuming default. Error: %v", name, node.clientIP, err) - node.workers = config.DefaultNumWorkers + node.workers = pkgconfigsetup.DefaultNumWorkers } else { node.workers = workers.Count } diff --git a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go index 981640d079eb0..c7da3d88d96eb 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -172,7 +172,7 @@ func (d *dispatcher) moveCheck(src, dest, checkID string) error { } func (d *dispatcher) rebalance(force bool) []types.RebalanceResponse { - if config.Datadog().GetBool("cluster_checks.rebalance_with_utilization") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.rebalance_with_utilization") { return d.rebalanceUsingUtilization(force) } @@ -339,7 +339,7 @@ func (d *dispatcher) rebalanceUsingUtilization(force bool) []types.RebalanceResp // checks. currentUtilizationStdDev := currentChecksDistribution.utilizationStdDev() proposedUtilizationStdDev := proposedDistribution.utilizationStdDev() - minPercImprovement := config.Datadog().GetInt("cluster_checks.rebalance_min_percentage_improvement") + minPercImprovement := pkgconfigsetup.Datadog().GetInt("cluster_checks.rebalance_min_percentage_improvement") if force || rebalanceIsWorthIt(currentChecksDistribution, proposedDistribution, minPercImprovement) { diff --git a/pkg/clusteragent/clusterchecks/dispatcher_rebalance_test.go b/pkg/clusteragent/clusterchecks/dispatcher_rebalance_test.go index 9550adc53b2f1..09747e5c82abf 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_rebalance_test.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_rebalance_test.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestRebalance(t *testing.T) { @@ -1522,9 +1522,9 @@ func TestRebalanceUsingUtilization(t *testing.T) { testDispatcher.store.active = true testDispatcher.store.nodes["node1"] = newNodeStore("node1", "") - testDispatcher.store.nodes["node1"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["node1"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["node2"] = newNodeStore("node2", "") - testDispatcher.store.nodes["node2"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["node2"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["node1"].clcRunnerStats = map[string]types.CLCRunnerStats{ // This is the check with the highest utilization. The code will try to diff --git a/pkg/clusteragent/clusterchecks/dispatcher_test.go b/pkg/clusteragent/clusterchecks/dispatcher_test.go index 12ffdb069206d..f2c31184ff8b7 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_test.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_test.go @@ -17,7 +17,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" @@ -469,7 +468,7 @@ func TestReset(t *testing.T) { } func TestPatchConfiguration(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) checkConfig := integration.Config{ Name: "test", @@ -507,7 +506,7 @@ func TestPatchConfiguration(t *testing.T) { } func TestPatchEndpointsConfiguration(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) checkConfig := integration.Config{ Name: "test", @@ -540,7 +539,7 @@ func TestPatchEndpointsConfiguration(t *testing.T) { } func TestExtraTags(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) for _, tc := range []struct { extraTagsConfig []string diff --git a/pkg/clusteragent/clusterchecks/handler.go b/pkg/clusteragent/clusterchecks/handler.go index ce2a51c252fe7..fff45ba64e3d1 100644 --- a/pkg/clusteragent/clusterchecks/handler.go +++ b/pkg/clusteragent/clusterchecks/handler.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/scheduler" "github.com/DataDog/datadog-agent/pkg/clusteragent/api" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -66,12 +66,12 @@ func NewHandler(ac pluggableAutoConfig) (*Handler, error) { h := &Handler{ autoconfig: ac, leaderStatusFreq: 5 * time.Second, - warmupDuration: config.Datadog().GetDuration("cluster_checks.warmup_duration") * time.Second, + warmupDuration: pkgconfigsetup.Datadog().GetDuration("cluster_checks.warmup_duration") * time.Second, leadershipChan: make(chan state, 1), dispatcher: newDispatcher(), } - if config.Datadog().GetBool("leader_election") { + if pkgconfigsetup.Datadog().GetBool("leader_election") { h.leaderForwarder = api.GetGlobalLeaderForwarder() callback, err := getLeaderIPCallback() if err != nil { diff --git a/pkg/clusteragent/clusterchecks/status.go b/pkg/clusteragent/clusterchecks/status.go index 858569baaf865..7877b2e8f932a 100644 --- a/pkg/clusteragent/clusterchecks/status.go +++ b/pkg/clusteragent/clusterchecks/status.go @@ -12,7 +12,7 @@ import ( "io" "github.com/DataDog/datadog-agent/comp/core/status" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Provider provides the functionality to populate the status output @@ -49,7 +49,7 @@ func (Provider) HTML(_ bool, _ io.Writer) error { } func populateStatus(stats map[string]interface{}) { - if config.Datadog().GetBool("cluster_checks.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") { if cchecks, err := GetStats(); err == nil { stats["clusterchecks"] = cchecks } diff --git a/pkg/clusteragent/languagedetection/patcher.go b/pkg/clusteragent/languagedetection/patcher.go index 3d8379a5a71a5..e1171876c65c3 100644 --- a/pkg/clusteragent/languagedetection/patcher.go +++ b/pkg/clusteragent/languagedetection/patcher.go @@ -26,7 +26,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -66,8 +66,8 @@ func newLanguagePatcher(ctx context.Context, store workloadmeta.Component, logge logger: logger, queue: workqueue.NewRateLimitingQueueWithConfig( workqueue.NewItemExponentialFailureRateLimiter( - config.Datadog().GetDuration("cluster_agent.language_detection.patcher.base_backoff"), - config.Datadog().GetDuration("cluster_agent.language_detection.patcher.max_backoff"), + pkgconfigsetup.Datadog().GetDuration("cluster_agent.language_detection.patcher.base_backoff"), + pkgconfigsetup.Datadog().GetDuration("cluster_agent.language_detection.patcher.max_backoff"), ), workqueue.RateLimitingQueueConfig{ Name: subsystem, diff --git a/pkg/clusteragent/orchestrator/status.go b/pkg/clusteragent/orchestrator/status.go index 27d56a681a88f..26ea3dab2d428 100644 --- a/pkg/clusteragent/orchestrator/status.go +++ b/pkg/clusteragent/orchestrator/status.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/status" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/orchestrator" orchcfg "github.com/DataDog/datadog-agent/pkg/orchestrator/config" pkgorchestratormodel "github.com/DataDog/datadog-agent/pkg/orchestrator/model" @@ -41,12 +41,12 @@ type stats struct { // GetStatus returns status info for the orchestrator explorer. func GetStatus(ctx context.Context, apiCl kubernetes.Interface) map[string]interface{} { status := make(map[string]interface{}) - if !config.Datadog().GetBool("orchestrator_explorer.enabled") { + if !pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.enabled") { status["Disabled"] = "The orchestrator explorer is not enabled on the Cluster Agent" return status } - if !config.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { status["Disabled"] = "Leader election is not enabled on the Cluster Agent. The orchestrator explorer needs leader election for resource collection." return status } @@ -81,7 +81,7 @@ func GetStatus(ctx context.Context, apiCl kubernetes.Interface) map[string]inter setSkippedResourcesInformationDCAMode(status) // rewriting DCA Mode in case we are running in cluster check mode. - if orchestrator.KubernetesResourceCache.ItemCount() == 0 && config.Datadog().GetBool("cluster_checks.enabled") { + if orchestrator.KubernetesResourceCache.ItemCount() == 0 && pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") { // we need to check first whether we have dispatched checks to CLC stats, err := clusterchecks.GetStats() if err != nil { @@ -102,11 +102,11 @@ func GetStatus(ctx context.Context, apiCl kubernetes.Interface) map[string]inter } // get options - if config.Datadog().GetBool("orchestrator_explorer.container_scrubbing.enabled") { + if pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.container_scrubbing.enabled") { status["ContainerScrubbing"] = "Container scrubbing: enabled" } - if config.Datadog().GetBool("orchestrator_explorer.manifest_collection.enabled") { + if pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.manifest_collection.enabled") { status["ManifestCollection"] = "Manifest collection: enabled" } @@ -252,7 +252,7 @@ func (Provider) HTML(_ bool, _ io.Writer) error { func populateStatus(stats map[string]interface{}) { apiCl, apiErr := apiserver.GetAPIClient() - if config.Datadog().GetBool("orchestrator_explorer.enabled") { + if pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.enabled") { if apiErr != nil { stats["orchestrator"] = map[string]string{"Error": apiErr.Error()} } else { diff --git a/pkg/clusteragent/telemetry/collector.go b/pkg/clusteragent/telemetry/collector.go index 801d8b8e21fd2..9e772bd44b9b8 100644 --- a/pkg/clusteragent/telemetry/collector.go +++ b/pkg/clusteragent/telemetry/collector.go @@ -16,7 +16,7 @@ import ( "strconv" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -101,7 +101,7 @@ func httpClientFactory(timeout time.Duration) func() *http.Client { return &http.Client{ Timeout: timeout, // reusing core agent HTTP transport to benefit from proxy settings. - Transport: httputils.CreateHTTPTransport(config.Datadog()), + Transport: httputils.CreateHTTPTransport(pkgconfigsetup.Datadog()), } } } @@ -112,7 +112,7 @@ func httpClientFactory(timeout time.Duration) func() *http.Client { func NewCollector(rcClientId string, kubernetesClusterId string) TelemetryCollector { return &telemetryCollector{ client: httputils.NewResetClient(httpClientResetInterval, httpClientFactory(httpClientTimeout)), - host: utils.GetMainEndpoint(config.Datadog(), mainEndpointPrefix, mainEndpointUrlKey), + host: utils.GetMainEndpoint(pkgconfigsetup.Datadog(), mainEndpointPrefix, mainEndpointUrlKey), userAgent: "Datadog Cluster Agent", rcClientId: rcClientId, kubernetesClusterId: kubernetesClusterId, @@ -154,12 +154,12 @@ func (tc *telemetryCollector) sendRemoteConfigEvent(eventName string, event ApmR log.Errorf("Error while trying to create a web request for a remote config event: %v", err) return } - if !config.Datadog().IsSet("api_key") { + if !pkgconfigsetup.Datadog().IsSet("api_key") { return } req.Header.Add("Content-Type", "application/json") req.Header.Add("User-Agent", tc.userAgent) - req.Header.Add("DD-API-KEY", config.Datadog().GetString("api_key")) + req.Header.Add("DD-API-KEY", pkgconfigsetup.Datadog().GetString("api_key")) req.Header.Add("Content-Length", bodyLen) resp, err := tc.client.Do(req) diff --git a/pkg/clusteragent/telemetry/collector_test.go b/pkg/clusteragent/telemetry/collector_test.go index 9835c3a1caaca..5e9a085d8bc47 100644 --- a/pkg/clusteragent/telemetry/collector_test.go +++ b/pkg/clusteragent/telemetry/collector_test.go @@ -14,7 +14,7 @@ import ( "net/http/httptest" "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/stretchr/testify/assert" ) @@ -57,7 +57,7 @@ func TestTelemetryPath(t *testing.T) { collector := NewCollector(testRcClientId, testKubernetesClusterId) collector.SetTestHost(server.URL) - config.Datadog().SetWithoutSource("api_key", "dummy") + pkgconfigsetup.Datadog().SetWithoutSource("api_key", "dummy") var reqCount int var path string diff --git a/pkg/collector/check/jmx.go b/pkg/collector/check/jmx.go index 9cf15a9aad5ac..ab88b4eb98288 100644 --- a/pkg/collector/check/jmx.go +++ b/pkg/collector/check/jmx.go @@ -9,7 +9,7 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - agentconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // IsJMXConfig checks if a certain YAML config contains at least one instance of a JMX config @@ -25,7 +25,7 @@ func IsJMXConfig(config integration.Config) bool { // IsJMXInstance checks if a certain YAML instance is a JMX config func IsJMXInstance(name string, instance integration.Data, initConfig integration.Data) bool { - if _, ok := agentconfig.StandardJMXIntegrations[name]; ok { + if _, ok := pkgconfigsetup.StandardJMXIntegrations[name]; ok { return true } diff --git a/pkg/collector/check/stats/stats.go b/pkg/collector/check/stats/stats.go index d7a78363271d2..5270dd79c0051 100644 --- a/pkg/collector/check/stats/stats.go +++ b/pkg/collector/check/stats/stats.go @@ -13,7 +13,7 @@ import ( "github.com/mitchellh/mapstructure" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -145,14 +145,14 @@ func NewStats(c StatsCheck) *Stats { CheckVersion: c.Version(), CheckConfigSource: c.ConfigSource(), Interval: c.Interval(), - telemetry: utils.IsCheckTelemetryEnabled(c.String(), config.Datadog()), + telemetry: utils.IsCheckTelemetryEnabled(c.String(), pkgconfigsetup.Datadog()), EventPlatformEvents: make(map[string]int64), TotalEventPlatformEvents: make(map[string]int64), } // We are interested in a check's run state values even when they are 0 so we // initialize them here explicitly - if stats.telemetry && utils.IsTelemetryEnabled(config.Datadog()) { + if stats.telemetry && utils.IsTelemetryEnabled(pkgconfigsetup.Datadog()) { tlmRuns.InitializeToZero(stats.CheckName, runCheckFailureTag) tlmRuns.InitializeToZero(stats.CheckName, runCheckSuccessTag) } diff --git a/pkg/collector/corechecks/checkbase.go b/pkg/collector/corechecks/checkbase.go index 68d410e5d7bbc..39a6a5b8df53e 100644 --- a/pkg/collector/corechecks/checkbase.go +++ b/pkg/collector/corechecks/checkbase.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/check/stats" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -64,7 +64,7 @@ func NewCheckBaseWithInterval(name string, defaultInterval time.Duration) CheckB checkName: name, checkID: checkid.ID(name), checkInterval: defaultInterval, - telemetry: utils.IsCheckTelemetryEnabled(name, config.Datadog()), + telemetry: utils.IsCheckTelemetryEnabled(name, pkgconfigsetup.Datadog()), } } diff --git a/pkg/collector/corechecks/cluster/helm/helm.go b/pkg/collector/corechecks/cluster/helm/helm.go index 11e42d1c4b0e2..d9392b85873dd 100644 --- a/pkg/collector/corechecks/cluster/helm/helm.go +++ b/pkg/collector/corechecks/cluster/helm/helm.go @@ -27,7 +27,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -95,7 +95,7 @@ func newCheck() check.Check { CheckBase: core.NewCheckBase(CheckName), instance: &checkConfig{}, store: newReleasesStore(), - runLeaderElection: !config.IsCLCRunner(), + runLeaderElection: !pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()), eventsManager: &eventsManager{}, } } @@ -456,7 +456,7 @@ func isManagedByHelm(object metav1.Object) bool { } func isLeader() (bool, error) { - if !config.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { return false, errors.New("leader election not enabled. The check will not run") } diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go index ef141ec4f61ef..367bedd234317 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go @@ -24,7 +24,8 @@ import ( core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/ksm/customresources" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" kubestatemetrics "github.com/DataDog/datadog-agent/pkg/kubestatemetrics/builder" ksmstore "github.com/DataDog/datadog-agent/pkg/kubestatemetrics/store" @@ -190,7 +191,7 @@ type KSMConfig struct { // KSMCheck wraps the config and the metric stores needed to run the check type KSMCheck struct { core.CheckBase - agentConfig ddconfig.Config + agentConfig model.Config instance *KSMConfig allStores [][]cache.Store telemetry *telemetryCache @@ -240,7 +241,7 @@ func init() { // Configure prepares the configuration of the KSM check instance func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConfigDigest uint64, config, initConfig integration.Data, source string) error { k.BuildID(integrationConfigDigest, config, initConfig) - k.agentConfig = ddconfig.Datadog() + k.agentConfig = pkgconfigsetup.Datadog() err := k.CommonConfigure(senderManager, initConfig, config, source) if err != nil { @@ -344,7 +345,7 @@ func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConf resyncPeriod := k.instance.ResyncPeriod if resyncPeriod == 0 { - resyncPeriod = ddconfig.Datadog().GetInt("kubernetes_informers_resync_period") + resyncPeriod = pkgconfigsetup.Datadog().GetInt("kubernetes_informers_resync_period") } builder.WithResync(time.Duration(resyncPeriod) * time.Second) @@ -526,7 +527,7 @@ func (k *KSMCheck) Run() error { // we also do a safety check for dedicated runners to avoid trying the leader election if (!k.isCLCRunner || !k.instance.LeaderSkip) && !podsFromKubeletInNodeAgent { // Only run if Leader Election is enabled. - if !ddconfig.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { return log.Error("Leader Election not enabled. The cluster-agent will not run the kube-state-metrics core check.") } @@ -952,8 +953,8 @@ func newKSMCheck(base core.CheckBase, instance *KSMConfig) *KSMCheck { CheckBase: base, instance: instance, telemetry: newTelemetryCache(), - isCLCRunner: ddconfig.IsCLCRunner(), - isRunningOnNodeAgent: flavor.GetFlavor() != flavor.ClusterAgent && !ddconfig.IsCLCRunner(), + isCLCRunner: pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()), + isRunningOnNodeAgent: flavor.GetFlavor() != flavor.ClusterAgent && !pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()), metricNamesMapper: defaultMetricNamesMapper(), metricAggregators: defaultMetricAggregators(), metricTransformers: defaultMetricTransformers(), diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_test.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_test.go index 7d04037fa97bf..001d20167f084 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_test.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ksmstore "github.com/DataDog/datadog-agent/pkg/kubestatemetrics/store" ) @@ -1644,7 +1644,7 @@ func TestKSMCheckInitTags(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - conf := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + conf := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) conf.SetWithoutSource("tags", tt.tagsInConfig) k := &KSMCheck{ diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go index 7054a232e7282..dfc6d051d3dea 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/kubetags" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -413,7 +413,7 @@ func init() { } func getEventSource(controllerName string, sourceComponent string) string { - if !ddConfig.Datadog().GetBool("kubernetes_events_source_detection.enabled") { + if !pkgconfigsetup.Datadog().GetBool("kubernetes_events_source_detection.enabled") { return kubernetesEventSource } diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go index ee6a132f7f9c5..5bb3ec879312b 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -124,7 +124,7 @@ type KubeASCheck struct { func (c *KubeASConfig) parse(data []byte) error { // default values - c.CollectEvent = ddConfig.Datadog().GetBool("collect_kubernetes_events") + c.CollectEvent = pkgconfigsetup.Datadog().GetBool("collect_kubernetes_events") c.CollectOShiftQuotas = true c.ResyncPeriodEvents = defaultResyncPeriodInSecond c.UseComponentStatus = true @@ -174,7 +174,7 @@ func (k *KubeASCheck) Configure(senderManager sender.SenderManager, _ uint64, co clusterName := clustername.GetRFC1123CompliantClusterName(context.TODO(), hostnameDetected) // Automatically add events based on activated Datadog products - if ddConfig.Datadog().GetBool("autoscaling.workload.enabled") { + if pkgconfigsetup.Datadog().GetBool("autoscaling.workload.enabled") { k.instance.CollectedEventTypes = append(k.instance.CollectedEventTypes, collectedEventType{ Source: "datadog-workload-autoscaler", }) @@ -204,7 +204,7 @@ func (k *KubeASCheck) Run() error { } defer sender.Commit() - if ddConfig.Datadog().GetBool("cluster_agent.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") { log.Debug("Cluster agent is enabled. Not running Kubernetes API Server check or collecting Kubernetes Events.") return nil } @@ -212,7 +212,7 @@ func (k *KubeASCheck) Run() error { // The Cluster Agent will passed in the `skip_leader_election` bool. if !k.instance.LeaderSkip { // Only run if Leader Election is enabled. - if !ddConfig.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { return log.Error("Leader Election not enabled. Not running Kubernetes API Server check or collecting Kubernetes Events.") } leader, errLeader := cluster.RunLeaderElection() diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go index b20bca4044e61..d410ec83c7724 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestReportClusterQuotas(t *testing.T) { @@ -29,9 +29,9 @@ func TestReportClusterQuotas(t *testing.T) { json.Unmarshal(raw, &list) require.Len(t, list.Items, 1) - prevClusterName := config.Datadog().GetString("cluster_name") - config.Datadog().SetWithoutSource("cluster_name", "test-cluster-name") - defer config.Datadog().SetWithoutSource("cluster_name", prevClusterName) + prevClusterName := pkgconfigsetup.Datadog().GetString("cluster_name") + pkgconfigsetup.Datadog().SetWithoutSource("cluster_name", "test-cluster-name") + defer pkgconfigsetup.Datadog().SetWithoutSource("cluster_name", prevClusterName) instanceCfg := []byte("") initCfg := []byte("") diff --git a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go index 9e583aaeabade..e528141618f03 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go +++ b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go @@ -20,7 +20,7 @@ import ( core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/orchestrator" orchcfg "github.com/DataDog/datadog-agent/pkg/orchestrator/config" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -89,7 +89,7 @@ func newOrchestratorCheck(base core.CheckBase, instance *OrchestratorInstance) * instance: instance, stopCh: make(chan struct{}), groupID: atomic.NewInt32(rand.Int31()), - isCLCRunner: config.IsCLCRunner(), + isCLCRunner: pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()), } } @@ -176,7 +176,7 @@ func (o *OrchestratorCheck) Run() error { // we also do a safety check for dedicated runners to avoid trying the leader election if !o.isCLCRunner || !o.instance.LeaderSkip { // Only run if Leader Election is enabled. - if !config.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { return log.Errorc("Leader Election not enabled. The cluster-agent will not run the check.", orchestrator.ExtraLogContext...) } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go index 43315ca94520e..fe0974a49fac2 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go @@ -11,7 +11,7 @@ package transformers import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" ) @@ -37,7 +37,7 @@ func RetrieveUnifiedServiceTags(labels map[string]string) []string { if tagValue, found := labels[kubernetes.EnvTagLabelKey]; found { tags = append(tags, fmt.Sprintf("%s:%s", labelToTagKeys[kubernetes.EnvTagLabelKey], tagValue)) } else { - if envTag := config.Datadog().GetString("env"); envTag != "" { + if envTag := pkgconfigsetup.Datadog().GetString("env"); envTag != "" { tags = append(tags, fmt.Sprintf("%s:%s", tagKeyEnv, envTag)) } } diff --git a/pkg/collector/corechecks/containerimage/check.go b/pkg/collector/corechecks/containerimage/check.go index bf953b83a8d2c..791373763743e 100644 --- a/pkg/collector/corechecks/containerimage/check.go +++ b/pkg/collector/corechecks/containerimage/check.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -106,7 +106,7 @@ func Factory(store workloadmeta.Component) optional.Option[func() check.Check] { // Configure parses the check configuration and initializes the container_image check func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, config, initConfig integration.Data, source string) error { - if !ddConfig.Datadog().GetBool("container_image.enabled") { + if !pkgconfigsetup.Datadog().GetBool("container_image.enabled") { return errors.New("collection of container images is disabled") } diff --git a/pkg/collector/corechecks/containerlifecycle/check.go b/pkg/collector/corechecks/containerlifecycle/check.go index 05347475beb57..b7a20adaa4c90 100644 --- a/pkg/collector/corechecks/containerlifecycle/check.go +++ b/pkg/collector/corechecks/containerlifecycle/check.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -53,7 +53,7 @@ type Check struct { // Configure parses the check configuration and initializes the container_lifecycle check func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, config, initConfig integration.Data, source string) error { - if !ddConfig.Datadog().GetBool("container_lifecycle.enabled") { + if !pkgconfigsetup.Datadog().GetBool("container_lifecycle.enabled") { return errors.New("collection of container lifecycle events is disabled") } @@ -117,7 +117,7 @@ func (c *Check) Run() error { ) var taskEventsCh chan workloadmeta.EventBundle - if ddConfig.Datadog().GetBool("ecs_task_collection_enabled") { + if pkgconfigsetup.Datadog().GetBool("ecs_task_collection_enabled") { taskFilter := workloadmeta.NewFilterBuilder(). SetSource(workloadmeta.SourceNodeOrchestrator). @@ -186,7 +186,7 @@ func Factory(store workloadmeta.Component) optional.Option[func() check.Check] { // sendFargateTaskEvent sends Fargate task lifecycle event at the end of the check func (c *Check) sendFargateTaskEvent() { - if !ddConfig.Datadog().GetBool("ecs_task_collection_enabled") || + if !pkgconfigsetup.Datadog().GetBool("ecs_task_collection_enabled") || !env.IsECSFargate() { return } diff --git a/pkg/collector/corechecks/containers/containerd/events.go b/pkg/collector/corechecks/containers/containerd/events.go index 8e024071a19b8..71c14aa585ac5 100644 --- a/pkg/collector/corechecks/containers/containerd/events.go +++ b/pkg/collector/corechecks/containers/containerd/events.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" ctrUtil "github.com/DataDog/datadog-agent/pkg/util/containerd" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -174,7 +174,7 @@ func (s *subscriber) run(ctx context.Context) error { return fmt.Errorf("subscriber is already running the event listener routine") } - excludePauseContainers := config.Datadog().GetBool("exclude_pause_container") + excludePauseContainers := pkgconfigsetup.Datadog().GetBool("exclude_pause_container") // Only used when excludePauseContainers is true var pauseContainers setPauseContainers diff --git a/pkg/collector/corechecks/containers/containerd/events_test.go b/pkg/collector/corechecks/containers/containerd/events_test.go index 53aae7473ff70..a4cc07881af0a 100644 --- a/pkg/collector/corechecks/containers/containerd/events_test.go +++ b/pkg/collector/corechecks/containers/containerd/events_test.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" containerdutil "github.com/DataDog/datadog-agent/pkg/util/containerd" "github.com/DataDog/datadog-agent/pkg/util/containerd/fake" @@ -246,8 +246,8 @@ func TestCheckEvents_PauseContainers(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - defaultExcludePauseContainers := config.Datadog().GetBool("exclude_pause_container") - config.Datadog().SetWithoutSource("exclude_pause_container", test.excludePauseContainers) + defaultExcludePauseContainers := pkgconfigsetup.Datadog().GetBool("exclude_pause_container") + pkgconfigsetup.Datadog().SetWithoutSource("exclude_pause_container", test.excludePauseContainers) if test.generateCreateEvent { eventCreateContainer, err := createContainerEvent(testNamespace, test.containerID) @@ -276,7 +276,7 @@ func TestCheckEvents_PauseContainers(t *testing.T) { assert.Empty(t, sub.Flush(time.Now().Unix())) } - config.Datadog().SetWithoutSource("exclude_pause_container", defaultExcludePauseContainers) + pkgconfigsetup.Datadog().SetWithoutSource("exclude_pause_container", defaultExcludePauseContainers) }) } diff --git a/pkg/collector/corechecks/containers/docker/check_network.go b/pkg/collector/corechecks/containers/docker/check_network.go index e8dbc5133f49f..3e89621573063 100644 --- a/pkg/collector/corechecks/containers/docker/check_network.go +++ b/pkg/collector/corechecks/containers/docker/check_network.go @@ -20,8 +20,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -38,7 +38,7 @@ func (d *DockerCheck) configureNetworkProcessor(processor *generic.Processor) { switch runtime.GOOS { case "linux": if env.IsHostProcAvailable() { - d.networkProcessorExtension = &dockerNetworkExtension{procPath: config.Datadog().GetString("container_proc_root")} + d.networkProcessorExtension = &dockerNetworkExtension{procPath: pkgconfigsetup.Datadog().GetString("container_proc_root")} } case "windows": d.networkProcessorExtension = &dockerNetworkExtension{} diff --git a/pkg/collector/corechecks/containers/generic/processor_network.go b/pkg/collector/corechecks/containers/generic/processor_network.go index 12d9538d4b5cf..d83fd0f215043 100644 --- a/pkg/collector/corechecks/containers/generic/processor_network.go +++ b/pkg/collector/corechecks/containers/generic/processor_network.go @@ -87,6 +87,7 @@ func (pn *ProcessorNetwork) processGroupedContainerNetwork() { for _, containerNetwork := range pn.ungroupedContainerNetwork { pn.generateNetworkMetrics(containerNetwork.tags, containerNetwork.stats) } + pn.ungroupedContainerNetwork = nil for _, containerNetworks := range pn.groupedContainerNetwork { // If we have multiple containers, tagging with container tag is incorrect as the metrics refer to whole isolation group. diff --git a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go index 5afbb23e3891f..f6f1f2ae62255 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go @@ -67,7 +67,7 @@ func (p *Provider) proberProbeTotal(metricFam *prom.MetricFamily, sender sender. metricSuffix = "startup_probe" default: log.Debugf("Unsupported probe type %s", probeType) - return + continue } result := metric.Metric["result"] @@ -80,17 +80,17 @@ func (p *Provider) proberProbeTotal(metricFam *prom.MetricFamily, sender sender. metricSuffix += ".unknown.total" default: log.Debugf("Unsupported probe result %s", result) - return + continue } cID, _ := common.GetContainerID(p.store, metric.Metric, p.filter) if cID == "" { - return + continue } tags, _ := tagger.Tag(cID, types.HighCardinality) if len(tags) == 0 { - return + continue } tags = utils.ConcatenateTags(tags, p.Config.Tags) diff --git a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go index ac9d134faf7db..4dfbf97124cce 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go @@ -99,11 +99,13 @@ func TestProvider_Provide(t *testing.T) { value: 281049, tags: []string{"instance_tag:something", "kube_namespace:kube-system", "pod_name:fluentbit-gke-45gvm", "kube_container_name:fluentbit"}, }, + /* Excluded container is not expected, see containers.Filter in the test { name: common.KubeletMetricsPrefix + "liveness_probe.success.total", value: 281049, tags: []string{"instance_tag:something", "kube_namespace:kube-system", "pod_name:fluentbit-gke-45gvm", "kube_container_name:fluentbit-gke"}, }, + */ { name: common.KubeletMetricsPrefix + "liveness_probe.success.total", value: 1686298, @@ -304,7 +306,7 @@ func TestProvider_Provide(t *testing.T) { p, err := NewProvider( &containers.Filter{ Enabled: true, - NameExcludeList: []*regexp.Regexp{regexp.MustCompile("agent-excluded")}, + NameExcludeList: []*regexp.Regexp{regexp.MustCompile("fluentbit-gke")}, }, config, store, diff --git a/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h b/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h index 58c63dfa59636..124081af8be78 100644 --- a/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h +++ b/pkg/collector/corechecks/ebpf/c/runtime/cgroup.h @@ -9,28 +9,32 @@ #include "bpf_tracing.h" #include "bpf_builtins.h" -static __always_inline int get_cgroup_name(char *buf, size_t sz) { - if (!bpf_helper_exists(BPF_FUNC_get_current_task)) { - return 0; - } +static __always_inline int get_cgroup_name_for_task(struct task_struct *task, char *buf, size_t sz) { bpf_memset(buf, 0, sz); - struct task_struct *cur_tsk = (struct task_struct *)bpf_get_current_task(); + #ifdef COMPILE_CORE + enum cgroup_subsys_id___local { + memory_cgrp_id___local = 123, /* value doesn't matter */ + }; + int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local, memory_cgrp_id___local); + #else + int cgrp_id = memory_cgrp_id; + #endif -#ifdef COMPILE_CORE - enum cgroup_subsys_id___local { - memory_cgrp_id___local = 123, /* value doesn't matter */ - }; - int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local, memory_cgrp_id___local); -#else - int cgrp_id = memory_cgrp_id; -#endif - const char *name = BPF_CORE_READ(cur_tsk, cgroups, subsys[cgrp_id], cgroup, kn, name); + const char *name = BPF_CORE_READ(task, cgroups, subsys[cgrp_id], cgroup, kn, name); if (bpf_probe_read_kernel(buf, sz, name) < 0) { return 0; } - return 1; } +static __always_inline int get_cgroup_name(char *buf, size_t sz) { + if (!bpf_helper_exists(BPF_FUNC_get_current_task)) { + return 0; + } + + struct task_struct *cur_tsk = (struct task_struct *)bpf_get_current_task(); + return get_cgroup_name_for_task(cur_tsk, buf, sz); +} + #endif /* defined(BPF_CGROUP_H) */ diff --git a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h index 6153c6ef6a711..c95008774b44f 100644 --- a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h +++ b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern-user.h @@ -9,14 +9,14 @@ struct oom_stats { char cgroup_name[129]; - // Pid of triggering process - __u32 pid; // Pid of killed process - __u32 tpid; - // Name of triggering process - char fcomm[TASK_COMM_LEN]; + __u32 victim_pid; + // Pid of triggering process + __u32 trigger_pid; // Name of killed process - char tcomm[TASK_COMM_LEN]; + char victim_comm[TASK_COMM_LEN]; + // Name of triggering process + char trigger_comm[TASK_COMM_LEN]; // OOM score of killed process __s64 score; // OOM score adjustment of killed process diff --git a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c index ee2f1a76523a7..35c7b4be7165d 100644 --- a/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c +++ b/pkg/collector/corechecks/ebpf/c/runtime/oom-kill-kern.c @@ -25,16 +25,17 @@ * the statistics per pid */ -BPF_HASH_MAP(oom_stats, u32, struct oom_stats, 10240) +BPF_HASH_MAP(oom_stats, u64, struct oom_stats, 10240) SEC("kprobe/oom_kill_process") int BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc) { struct oom_stats zero = {}; struct oom_stats new = {}; + u64 ts = bpf_ktime_get_ns(); u32 pid = bpf_get_current_pid_tgid() >> 32; - bpf_map_update_elem(&oom_stats, &pid, &zero, BPF_NOEXIST); - struct oom_stats *s = bpf_map_lookup_elem(&oom_stats, &pid); + bpf_map_update_elem(&oom_stats, &ts, &zero, BPF_NOEXIST); + struct oom_stats *s = bpf_map_lookup_elem(&oom_stats, &ts); if (!s) { return 0; } @@ -43,15 +44,14 @@ int BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc) { // expected a pointer to stack memory. Therefore, we work on stack // variable and update the map value at the end bpf_memcpy(&new, s, sizeof(struct oom_stats)); - - new.pid = pid; - get_cgroup_name(new.cgroup_name, sizeof(new.cgroup_name)); + new.trigger_pid = pid; struct task_struct *p = (struct task_struct *)BPF_CORE_READ(oc, chosen); if (!p) { return 0; } - BPF_CORE_READ_INTO(&new.tpid, p, pid); + get_cgroup_name_for_task(p, new.cgroup_name, sizeof(new.cgroup_name)); + BPF_CORE_READ_INTO(&new.victim_pid, p, pid); BPF_CORE_READ_INTO(&new.score, oc, chosen_points); #ifdef COMPILE_CORE if (bpf_core_field_exists(p->signal->oom_score_adj)) { @@ -63,11 +63,11 @@ int BPF_KPROBE(kprobe__oom_kill_process, struct oom_control *oc) { bpf_probe_read_kernel(&new.score_adj, sizeof(new.score_adj), &sig->oom_score_adj); #endif if (bpf_helper_exists(BPF_FUNC_get_current_comm)) { - bpf_get_current_comm(new.fcomm, sizeof(new.fcomm)); + bpf_get_current_comm(new.trigger_comm, sizeof(new.trigger_comm)); } - BPF_CORE_READ_INTO(&new.tcomm, p, comm); - new.tcomm[TASK_COMM_LEN - 1] = 0; + BPF_CORE_READ_INTO(&new.victim_comm, p, comm); + new.victim_comm[TASK_COMM_LEN - 1] = 0; struct mem_cgroup *memcg = NULL; #ifdef COMPILE_CORE diff --git a/pkg/collector/corechecks/ebpf/ebpf.go b/pkg/collector/corechecks/ebpf/ebpf.go index 688a781d50252..4e9e6fe65a3cf 100644 --- a/pkg/collector/corechecks/ebpf/ebpf.go +++ b/pkg/collector/corechecks/ebpf/ebpf.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" ebpfcheck "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/ebpfcheck/model" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processnet "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -68,7 +68,7 @@ func (m *EBPFCheck) Configure(senderManager sender.SenderManager, _ uint64, conf if err := m.config.Parse(config); err != nil { return fmt.Errorf("ebpf check config: %s", err) } - if err := processnet.CheckPath(ddconfig.SystemProbe().GetString("system_probe_config.sysprobe_socket")); err != nil { + if err := processnet.CheckPath(pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")); err != nil { return fmt.Errorf("sysprobe socket: %s", err) } @@ -80,7 +80,7 @@ func (m *EBPFCheck) Run() error { if m.sysProbeUtil == nil { var err error m.sysProbeUtil, err = processnet.GetRemoteSystemProbeUtil( - ddconfig.SystemProbe().GetString("system_probe_config.sysprobe_socket"), + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket"), ) if err != nil { return fmt.Errorf("sysprobe connection: %s", err) diff --git a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go index 75797cb5352a0..0a5b506ce1e7b 100644 --- a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go +++ b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go @@ -26,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/oomkill/model" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" process_net "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/cgroups" @@ -87,7 +87,7 @@ func (m *OOMKillCheck) Run() error { } sysProbeUtil, err := process_net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { return err } @@ -132,8 +132,8 @@ func (m *OOMKillCheck) Run() error { triggerTypeText = "This OOM kill was invoked by the system." } tags = append(tags, "trigger_type:"+triggerType) - tags = append(tags, "trigger_process_name:"+line.FComm) - tags = append(tags, "process_name:"+line.TComm) + tags = append(tags, "trigger_process_name:"+line.TriggerComm) + tags = append(tags, "process_name:"+line.VictimComm) // submit counter metric sender.Count("oom_kill.oom_process.count", 1, "", tags) @@ -145,7 +145,7 @@ func (m *OOMKillCheck) Run() error { SourceTypeName: CheckName, EventType: CheckName, AggregationKey: containerID, - Title: fmt.Sprintf("Process OOM Killed: oom_kill_process called on %s (pid: %d)", line.TComm, line.TPid), + Title: fmt.Sprintf("Process OOM Killed: oom_kill_process called on %s (pid: %d)", line.VictimComm, line.VictimPid), Tags: tags, } @@ -155,10 +155,10 @@ func (m *OOMKillCheck) Run() error { if line.ScoreAdj != 0 { oomScoreAdj = fmt.Sprintf(", oom_score_adj: %d", line.ScoreAdj) } - if line.Pid == line.TPid { - fmt.Fprintf(&b, "Process `%s` (pid: %d, oom_score: %d%s) triggered an OOM kill on itself.", line.FComm, line.Pid, line.Score, oomScoreAdj) + if line.VictimPid == line.TriggerPid { + fmt.Fprintf(&b, "Process `%s` (pid: %d, oom_score: %d%s) triggered an OOM kill on itself.", line.VictimComm, line.VictimPid, line.Score, oomScoreAdj) } else { - fmt.Fprintf(&b, "Process `%s` (pid: %d) triggered an OOM kill on process `%s` (pid: %d, oom_score: %d%s).", line.FComm, line.Pid, line.TComm, line.TPid, line.Score, oomScoreAdj) + fmt.Fprintf(&b, "Process `%s` (pid: %d) triggered an OOM kill on process `%s` (pid: %d, oom_score: %d%s).", line.TriggerComm, line.TriggerPid, line.VictimComm, line.VictimPid, line.Score, oomScoreAdj) } fmt.Fprintf(&b, "\n The process had reached %d pages in size. \n\n", line.Pages) b.WriteString(triggerTypeText) diff --git a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go index 526b1b0220d4b..d3cd6aab4cf12 100644 --- a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go +++ b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go @@ -28,7 +28,7 @@ import ( "golang.org/x/sys/unix" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/ebpfcheck/model" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" ddmaps "github.com/DataDog/datadog-agent/pkg/ebpf/maps" @@ -86,17 +86,17 @@ func NewProbe(cfg *ddebpf.Config) (*Probe, error) { return nil, err } - if ddconfig.SystemProbe().GetBool("ebpf_check.kernel_bpf_stats") { + if pkgconfigsetup.SystemProbe().GetBool("ebpf_check.kernel_bpf_stats") { probe.statsFD, err = ebpf.EnableStats(unix.BPF_STATS_RUN_TIME) if err != nil { log.Warnf("kernel ebpf stats failed to enable, program runtime and run count will be unavailable: %s", err) } } - probe.mapBuffers.keysBufferSizeLimit = uint32(ddconfig.SystemProbe().GetInt("ebpf_check.entry_count.max_keys_buffer_size_bytes")) - probe.mapBuffers.valuesBufferSizeLimit = uint32(ddconfig.SystemProbe().GetInt("ebpf_check.entry_count.max_values_buffer_size_bytes")) - probe.mapBuffers.iterationRestartDetectionEntries = ddconfig.SystemProbe().GetInt("ebpf_check.entry_count.entries_for_iteration_restart_detection") - probe.entryCountMaxRestarts = ddconfig.SystemProbe().GetInt("ebpf_check.entry_count.max_restarts") + probe.mapBuffers.keysBufferSizeLimit = uint32(pkgconfigsetup.SystemProbe().GetInt("ebpf_check.entry_count.max_keys_buffer_size_bytes")) + probe.mapBuffers.valuesBufferSizeLimit = uint32(pkgconfigsetup.SystemProbe().GetInt("ebpf_check.entry_count.max_values_buffer_size_bytes")) + probe.mapBuffers.iterationRestartDetectionEntries = pkgconfigsetup.SystemProbe().GetInt("ebpf_check.entry_count.entries_for_iteration_restart_detection") + probe.entryCountMaxRestarts = pkgconfigsetup.SystemProbe().GetInt("ebpf_check.entry_count.max_restarts") if isForEachElemHelperAvailable() { probe.mphCache = newMapProgHelperCache() diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go b/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go index 062036afd3cb7..9637c6f16adcf 100644 --- a/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go +++ b/pkg/collector/corechecks/ebpf/probe/oomkill/c_types_linux.go @@ -4,14 +4,14 @@ package oomkill type oomStats struct { - Cgroup_name [129]byte - Pid uint32 - Tpid uint32 - Fcomm [16]byte - Tcomm [16]byte - Score int64 - Score_adj int16 - Pages uint64 - Memcg_oom uint32 - Pad_cgo_0 [4]byte + Cgroup_name [129]byte + Victim_pid uint32 + Trigger_pid uint32 + Victim_comm [16]byte + Trigger_comm [16]byte + Score int64 + Score_adj int16 + Pages uint64 + Memcg_oom uint32 + Pad_cgo_0 [4]byte } diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go b/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go index 42272d27dbf7f..f065837bb3094 100644 --- a/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go +++ b/pkg/collector/corechecks/ebpf/probe/oomkill/model/oom_kill_types.go @@ -8,13 +8,13 @@ package model // OOMKillStats contains the statistics of a given socket type OOMKillStats struct { - CgroupName string `json:"cgroupName"` - Pid uint32 `json:"pid"` - TPid uint32 `json:"tpid"` - FComm string `json:"fcomm"` - TComm string `json:"tcomm"` - Score int64 `json:"score"` - ScoreAdj int16 `json:"scoreAdj"` - Pages uint64 `json:"pages"` - MemCgOOM uint32 `json:"memcgoom"` + CgroupName string `json:"cgroupName"` + VictimPid uint32 `json:"victimPid"` + TriggerPid uint32 `json:"triggerPid"` + VictimComm string `json:"victimComm"` + TriggerComm string `json:"triggerComm"` + Score int64 `json:"score"` + ScoreAdj int16 `json:"scoreAdj"` + Pages uint64 `json:"pages"` + MemCgOOM uint32 `json:"memcgoom"` } diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go index fc9e27a78a31a..e277b3f48381b 100644 --- a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go +++ b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill.go @@ -34,7 +34,7 @@ const oomMapName = "oom_stats" // Probe is the eBPF side of the OOM Kill check type Probe struct { m *manager.Manager - oomMap *maps.GenericMap[uint32, oomStats] + oomMap *maps.GenericMap[uint64, oomStats] } // NewProbe creates a [Probe] @@ -117,7 +117,7 @@ func startOOMKillProbe(buf bytecode.AssetReader, managerOptions manager.Options) return nil, fmt.Errorf("failed to start manager: %w", err) } - oomMap, err := maps.GetMap[uint32, oomStats](m, oomMapName) + oomMap, err := maps.GetMap[uint64, oomStats](m, oomMapName) if err != nil { return nil, fmt.Errorf("failed to get map '%s': %w", oomMapName, err) } @@ -139,19 +139,21 @@ func (k *Probe) Close() { // GetAndFlush gets the stats func (k *Probe) GetAndFlush() (results []model.OOMKillStats) { - var pid uint32 + var allTimestamps []uint64 + var ts uint64 var stat oomStats it := k.oomMap.Iterate() - for it.Next(&pid, &stat) { + for it.Next(&ts, &stat) { results = append(results, convertStats(stat)) + allTimestamps = append(allTimestamps, ts) } if err := it.Err(); err != nil { log.Warnf("failed to iterate on OOM stats while flushing: %s", err) } - for _, r := range results { - if err := k.oomMap.Delete(&r.Pid); err != nil { + for _, ts := range allTimestamps { + if err := k.oomMap.Delete(&ts); err != nil { log.Warnf("failed to delete stat: %s", err) } } @@ -161,12 +163,12 @@ func (k *Probe) GetAndFlush() (results []model.OOMKillStats) { func convertStats(in oomStats) (out model.OOMKillStats) { out.CgroupName = unix.ByteSliceToString(in.Cgroup_name[:]) - out.Pid = in.Pid - out.TPid = in.Tpid + out.VictimPid = in.Victim_pid + out.TriggerPid = in.Trigger_pid out.Score = in.Score out.ScoreAdj = in.Score_adj - out.FComm = unix.ByteSliceToString(in.Fcomm[:]) - out.TComm = unix.ByteSliceToString(in.Tcomm[:]) + out.VictimComm = unix.ByteSliceToString(in.Victim_comm[:]) + out.TriggerComm = unix.ByteSliceToString(in.Trigger_comm[:]) out.Pages = in.Pages out.MemCgOOM = in.Memcg_oom return diff --git a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go index 96f7b67bd043b..02e76e112b679 100644 --- a/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go +++ b/pkg/collector/corechecks/ebpf/probe/oomkill/oom_kill_test.go @@ -92,7 +92,7 @@ func TestOOMKillProbe(t *testing.T) { var result model.OOMKillStats require.Eventually(t, func() bool { for _, r := range oomKillProbe.GetAndFlush() { - if r.TPid == uint32(cmd.Process.Pid) { + if r.TriggerPid == uint32(cmd.Process.Pid) { result = r return true } @@ -101,11 +101,11 @@ func TestOOMKillProbe(t *testing.T) { }, 10*time.Second, 500*time.Millisecond, "failed to find an OOM killed process with pid %d", cmd.Process.Pid) assert.Regexp(t, regexp.MustCompile("run-([0-9|a-z]*).scope"), result.CgroupName, "cgroup name") - assert.Equal(t, result.TPid, result.Pid, "tpid == pid") + assert.Equal(t, result.TriggerPid, result.VictimPid, "tpid == pid") assert.NotZero(t, result.Score, "score") assert.Equal(t, int16(42), result.ScoreAdj, "score adj") - assert.Equal(t, "dd", result.FComm, "fcomm") - assert.Equal(t, "dd", result.TComm, "tcomm") + assert.Equal(t, "dd", result.VictimComm, "victim comm") + assert.Equal(t, "dd", result.TriggerComm, "trigger comm") assert.NotZero(t, result.Pages, "pages") assert.Equal(t, uint32(1), result.MemCgOOM, "memcg oom") }) diff --git a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go index 30fa918eb37f3..126f949876c87 100644 --- a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go +++ b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/tcpqueuelength/model" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" process_net "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/cgroups" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -83,7 +83,7 @@ func (t *TCPQueueLengthCheck) Run() error { } sysProbeUtil, err := process_net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { return err } diff --git a/pkg/collector/corechecks/embed/apm/apm.go b/pkg/collector/corechecks/embed/apm/apm.go index af0d2d4afa7b9..28162165eee6d 100644 --- a/pkg/collector/corechecks/embed/apm/apm.go +++ b/pkg/collector/corechecks/embed/apm/apm.go @@ -25,7 +25,7 @@ import ( checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/check/stats" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/embed/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -96,10 +96,10 @@ func (c *APMCheck) run() error { hname, _ := hostname.Get(context.TODO()) env := os.Environ() - env = append(env, fmt.Sprintf("DD_API_KEY=%s", utils.SanitizeAPIKey(config.Datadog().GetString("api_key")))) + env = append(env, fmt.Sprintf("DD_API_KEY=%s", utils.SanitizeAPIKey(pkgconfigsetup.Datadog().GetString("api_key")))) env = append(env, fmt.Sprintf("DD_HOSTNAME=%s", hname)) - env = append(env, fmt.Sprintf("DD_DOGSTATSD_PORT=%s", config.Datadog().GetString("dogstatsd_port"))) - env = append(env, fmt.Sprintf("DD_LOG_LEVEL=%s", config.Datadog().GetString("log_level"))) + env = append(env, fmt.Sprintf("DD_DOGSTATSD_PORT=%s", pkgconfigsetup.Datadog().GetString("dogstatsd_port"))) + env = append(env, fmt.Sprintf("DD_LOG_LEVEL=%s", pkgconfigsetup.Datadog().GetString("log_level"))) cmd.Env = env // forward the standard output to the Agent logger @@ -176,7 +176,7 @@ func (c *APMCheck) Configure(_ sender.SenderManager, _ uint64, data integration. c.binPath = defaultBinPath } - configFile := config.Datadog().ConfigFileUsed() + configFile := pkgconfigsetup.Datadog().ConfigFileUsed() c.commandOpts = []string{} @@ -186,7 +186,7 @@ func (c *APMCheck) Configure(_ sender.SenderManager, _ uint64, data integration. } c.source = source - c.telemetry = utils.IsCheckTelemetryEnabled("apm", config.Datadog()) + c.telemetry = utils.IsCheckTelemetryEnabled("apm", pkgconfigsetup.Datadog()) c.initConfig = string(initConfig) c.instanceConfig = string(data) return nil diff --git a/pkg/collector/corechecks/embed/process/process_agent.go b/pkg/collector/corechecks/embed/process/process_agent.go index 1aef66f233b8a..c27d5a4b8f975 100644 --- a/pkg/collector/corechecks/embed/process/process_agent.go +++ b/pkg/collector/corechecks/embed/process/process_agent.go @@ -25,7 +25,7 @@ import ( checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/check/stats" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/embed/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/executable" @@ -158,7 +158,7 @@ func (c *ProcessAgentCheck) run() error { func (c *ProcessAgentCheck) Configure(senderManager sender.SenderManager, _ uint64, data integration.Data, initConfig integration.Data, source string) error { // only log whether process check is enabled or not but don't return early, because we still need to initialize "binPath", "source" and // start up process-agent. Ultimately it's up to process-agent to decide whether to run or not based on the config - if enabled := config.Datadog().GetBool("process_config.process_collection.enabled"); !enabled { + if enabled := pkgconfigsetup.Datadog().GetBool("process_config.process_collection.enabled"); !enabled { log.Info("live process monitoring is disabled through main configuration file") } @@ -185,14 +185,14 @@ func (c *ProcessAgentCheck) Configure(senderManager sender.SenderManager, _ uint } // be explicit about the config file location - configFile := config.Datadog().ConfigFileUsed() + configFile := pkgconfigsetup.Datadog().ConfigFileUsed() c.commandOpts = []string{} if _, err := os.Stat(configFile); !os.IsNotExist(err) { c.commandOpts = append(c.commandOpts, fmt.Sprintf("-config=%s", configFile)) } c.source = source - c.telemetry = utils.IsCheckTelemetryEnabled("process_agent", config.Datadog()) + c.telemetry = utils.IsCheckTelemetryEnabled("process_agent", pkgconfigsetup.Datadog()) c.initConfig = string(initConfig) c.instanceConfig = string(data) return nil diff --git a/pkg/collector/corechecks/net/ntp/ntp_test.go b/pkg/collector/corechecks/net/ntp/ntp_test.go index 5e2959c7e3c24..d6035f396642d 100644 --- a/pkg/collector/corechecks/net/ntp/ntp_test.go +++ b/pkg/collector/corechecks/net/ntp/ntp_test.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" ) @@ -361,7 +361,7 @@ func TestDefaultHostConfig(t *testing.T) { expectedHosts := []string{"0.datadog.pool.ntp.org", "1.datadog.pool.ntp.org", "2.datadog.pool.ntp.org", "3.datadog.pool.ntp.org"} testedConfig := []byte(``) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{}) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{}) ntpCheck := new(NTPCheck) ntpCheck.Configure(aggregator.NewNoOpSenderManager(), integration.FakeConfigHash, testedConfig, []byte(""), "test") diff --git a/pkg/collector/corechecks/networkpath/config.go b/pkg/collector/corechecks/networkpath/config.go index c7b57f6924a6a..f59acbb12301c 100644 --- a/pkg/collector/corechecks/networkpath/config.go +++ b/pkg/collector/corechecks/networkpath/config.go @@ -10,21 +10,28 @@ import ( "strings" "time" + "gopkg.in/yaml.v2" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" - "gopkg.in/yaml.v2" ) const ( defaultCheckInterval time.Duration = 1 * time.Minute ) +// Number is a type that is used to make a generic version +// of the firstNonZero function +type Number interface { + ~int | ~int64 | ~uint8 +} + // InitConfig is used to deserialize integration init config type InitConfig struct { MinCollectionInterval int64 `yaml:"min_collection_interval"` TimeoutMs int64 `yaml:"timeout"` + MaxTTL uint8 `yaml:"max_ttl"` } // InstanceConfig is used to deserialize integration instance config @@ -83,7 +90,6 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data c.DestPort = instance.DestPort c.SourceService = instance.SourceService c.DestinationService = instance.DestinationService - c.MaxTTL = instance.MaxTTL c.Protocol = payload.Protocol(strings.ToUpper(instance.Protocol)) c.MinCollectionInterval = firstNonZero( @@ -104,13 +110,19 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data return nil, fmt.Errorf("timeout must be > 0") } + c.MaxTTL = firstNonZero( + instance.MaxTTL, + initConfig.MaxTTL, + setup.DefaultNetworkPathMaxTTL, + ) + c.Tags = instance.Tags - c.Namespace = coreconfig.Datadog().GetString("network_devices.namespace") + c.Namespace = setup.Datadog().GetString("network_devices.namespace") return c, nil } -func firstNonZero(values ...time.Duration) time.Duration { +func firstNonZero[T Number](values ...T) T { for _, value := range values { if value != 0 { return value diff --git a/pkg/collector/corechecks/networkpath/config_test.go b/pkg/collector/corechecks/networkpath/config_test.go index e34be2bb11cb9..c4d7a591deb5f 100644 --- a/pkg/collector/corechecks/networkpath/config_test.go +++ b/pkg/collector/corechecks/networkpath/config_test.go @@ -9,15 +9,15 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" - "github.com/stretchr/testify/assert" ) func TestNewCheckConfig(t *testing.T) { - coreconfig.Datadog().SetDefault("network_devices.namespace", "my-namespace") + setup.Datadog().SetDefault("network_devices.namespace", "my-namespace") tests := []struct { name string rawInstance integration.Data @@ -36,6 +36,7 @@ hostname: 1.2.3.4 MinCollectionInterval: time.Duration(60) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -71,6 +72,7 @@ min_collection_interval: 10 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -86,6 +88,7 @@ min_collection_interval: 10 MinCollectionInterval: time.Duration(10) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -98,6 +101,7 @@ hostname: 1.2.3.4 MinCollectionInterval: time.Duration(1) * time.Minute, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -115,6 +119,7 @@ destination_service: service-b MinCollectionInterval: time.Duration(60) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -130,6 +135,7 @@ protocol: udp Namespace: "my-namespace", Protocol: payload.ProtocolUDP, Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -145,6 +151,7 @@ protocol: UDP Namespace: "my-namespace", Protocol: payload.ProtocolUDP, Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -160,6 +167,7 @@ protocol: TCP Namespace: "my-namespace", Protocol: payload.ProtocolTCP, Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -177,6 +185,7 @@ min_collection_interval: 10 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: 50000 * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -195,6 +204,7 @@ timeout: 70000 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: 50000 * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -212,6 +222,7 @@ timeout: 70000 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: 70000 * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -228,6 +239,7 @@ min_collection_interval: 10 MinCollectionInterval: time.Duration(42) * time.Second, Namespace: "my-namespace", Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: setup.DefaultNetworkPathMaxTTL, }, }, { @@ -242,6 +254,61 @@ timeout: -1 `), expectedError: "timeout must be > 0", }, + { + name: "maxTTL from instance config", + rawInstance: []byte(` +hostname: 1.2.3.4 +max_ttl: 50 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: 50, + }, + }, + { + name: "maxTTL from instance config preferred over init config", + rawInstance: []byte(` +hostname: 1.2.3.4 +max_ttl: 50 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +max_ttl: 64 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: 50, + }, + }, + { + name: "maxTTL from init config", + rawInstance: []byte(` +hostname: 1.2.3.4 +min_collection_interval: 42 +`), + rawInitConfig: []byte(` +min_collection_interval: 10 +max_ttl: 64 +`), + expectedConfig: &CheckConfig{ + DestHostname: "1.2.3.4", + MinCollectionInterval: time.Duration(42) * time.Second, + Namespace: "my-namespace", + Timeout: setup.DefaultNetworkPathTimeout * time.Millisecond, + MaxTTL: 64, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/collector/corechecks/networkpath/networkpath.go b/pkg/collector/corechecks/networkpath/networkpath.go index e7ba9465f268b..8c810bc423509 100644 --- a/pkg/collector/corechecks/networkpath/networkpath.go +++ b/pkg/collector/corechecks/networkpath/networkpath.go @@ -67,12 +67,12 @@ func (c *Check) Run() error { return fmt.Errorf("failed to trace path: %w", err) } path.Namespace = c.config.Namespace + path.Origin = payload.PathOriginNetworkPathIntegration // Add tags to path - commonTags := append(utils.GetCommonAgentTags(), c.config.Tags...) path.Source.Service = c.config.SourceService path.Destination.Service = c.config.DestinationService - path.Tags = commonTags + path.Tags = c.config.Tags // send to EP err = c.SendNetPathMDToEP(senderInstance, path) @@ -80,7 +80,8 @@ func (c *Check) Run() error { return fmt.Errorf("failed to send network path metadata: %w", err) } - c.submitTelemetry(metricSender, path, commonTags, startTime) + metricTags := append(utils.GetCommonAgentTags(), c.config.Tags...) + c.submitTelemetry(metricSender, path, metricTags, startTime) senderInstance.Commit() return nil diff --git a/pkg/collector/corechecks/oracle/activity.go b/pkg/collector/corechecks/oracle/activity.go index 9b3ce2fa3c134..3979e1e38eff6 100644 --- a/pkg/collector/corechecks/oracle/activity.go +++ b/pkg/collector/corechecks/oracle/activity.go @@ -469,6 +469,9 @@ AND status = 'ACTIVE'`) return err } sendMetricWithDefaultTags(c, gauge, "dd.oracle.activity.time_ms", float64(time.Since(start).Milliseconds())) + TlmOracleActivityLatency.Observe(float64(time.Since(start).Milliseconds())) + TlmOracleActivitySamplesCount.Add(float64(len(sessionRows))) + sender.Commit() return nil diff --git a/pkg/collector/corechecks/oracle/statements.go b/pkg/collector/corechecks/oracle/statements.go index bacc29624bea7..910cc06acdd73 100644 --- a/pkg/collector/corechecks/oracle/statements.go +++ b/pkg/collector/corechecks/oracle/statements.go @@ -868,8 +868,10 @@ func (c *Check) StatementMetrics() (int, error) { sender.EventPlatformEvent(payloadBytes, "dbm-metrics") sendMetricWithDefaultTags(c, gauge, "dd.oracle.statements_metrics.time_ms", float64(time.Since(start).Milliseconds())) + TlmOracleStatementMetricsLatency.Observe(float64(time.Since(start).Milliseconds())) if c.config.ExecutionPlans.Enabled { sendMetricWithDefaultTags(c, gauge, "dd.oracle.plan_errors.count", float64(planErrors)) + TlmOracleStatementMetricsErrorCount.Add(float64(planErrors)) } sender.Commit() diff --git a/pkg/collector/corechecks/oracle/telemetry.go b/pkg/collector/corechecks/oracle/telemetry.go new file mode 100644 index 0000000000000..34be6ef378d6b --- /dev/null +++ b/pkg/collector/corechecks/oracle/telemetry.go @@ -0,0 +1,25 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build oracle + +package oracle + +import ( + "github.com/DataDog/datadog-agent/pkg/telemetry" +) + +// These collectors gather telemetry data for cross-org analysis +// They are not expected to appear in the originiating org's metrics +var ( + // TlmOracleActivityLatency is the time for the activity gathering to complete + TlmOracleActivityLatency = telemetry.NewHistogram("oracle", "activity_latency", nil, "Histogram of activity query latency in ms", []float64{10, 25, 50, 75, 100, 250, 500, 1000, 10000}) + // TlmOracleActivitySamplesCount is the number of activity samples collected + TlmOracleActivitySamplesCount = telemetry.NewCounter("oracle", "activity_samples_count", nil, "Number of activity samples collected") + // TlmOracleStatementMetricsLatency is the time for the statement metrics gathering to complete + TlmOracleStatementMetricsLatency = telemetry.NewHistogram("oracle", "statement_metrics", nil, "Histogram of statement metrics latency in ms", []float64{10, 25, 50, 75, 100, 250, 500, 1000, 10000}) + // TlmOracleStatementMetricsErrorCount is the number of statement plan errors + TlmOracleStatementMetricsErrorCount = telemetry.NewCounter("oracle", "statement_plan_errors", nil, "Number of statement plan errors") +) diff --git a/pkg/collector/corechecks/orchestrator/pod/pod_test.go b/pkg/collector/corechecks/orchestrator/pod/pod_test.go index f803c7534f784..c3ef7a1abee43 100644 --- a/pkg/collector/corechecks/orchestrator/pod/pod_test.go +++ b/pkg/collector/corechecks/orchestrator/pod/pod_test.go @@ -22,12 +22,13 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/setup/constants" oconfig "github.com/DataDog/datadog-agent/pkg/orchestrator/config" "github.com/DataDog/datadog-agent/pkg/serializer/types" "github.com/DataDog/datadog-agent/pkg/util/cache" @@ -150,7 +151,7 @@ func TestPodTestSuite(t *testing.T) { } func (suite *PodTestSuite) TestPodCheck() { - cacheKey := cache.BuildAgentKey(config.ClusterIDCacheKey) + cacheKey := cache.BuildAgentKey(constants.ClusterIDCacheKey) cachedClusterID, found := cache.Cache.Get(cacheKey) if !found { cache.Cache.Set(cacheKey, strings.Repeat("1", 36), cache.NoExpiration) diff --git a/pkg/collector/corechecks/sbom/processor.go b/pkg/collector/corechecks/sbom/processor.go index 637bf4ecdfbe8..91bfeddccf531 100644 --- a/pkg/collector/corechecks/sbom/processor.go +++ b/pkg/collector/corechecks/sbom/processor.go @@ -10,9 +10,6 @@ package sbom import ( "context" "errors" - "io/fs" - "os" - "path/filepath" "strings" "time" @@ -21,9 +18,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - "github.com/DataDog/datadog-agent/pkg/config/env" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors/host" sbomscanner "github.com/DataDog/datadog-agent/pkg/sbom/scanner" @@ -38,7 +34,7 @@ import ( ) var /* const */ ( - envVarEnv = ddConfig.Datadog().GetString("env") + envVarEnv = pkgconfigsetup.Datadog().GetString("env") sourceAgent = "agent" ) @@ -222,39 +218,13 @@ func (p *processor) processHostScanResult(result sbom.ScanResult) { p.queue <- sbom } -type relFS struct { - root string - fs fs.FS -} - -func newFS(root string) fs.FS { - fs := os.DirFS(root) - return &relFS{root: "/", fs: fs} -} - -func (f *relFS) Open(name string) (fs.File, error) { - if filepath.IsAbs(name) { - var err error - name, err = filepath.Rel(f.root, name) - if err != nil { - return nil, err - } - } - - return f.fs.Open(name) -} - func (p *processor) triggerHostScan() { if !p.hostSBOM { return } log.Debugf("Triggering host SBOM refresh") - scanPath := "/" - if hostRoot := os.Getenv("HOST_ROOT"); env.IsContainerized() && hostRoot != "" { - scanPath = hostRoot - } - scanRequest := host.NewScanRequest(scanPath, newFS("/")) + scanRequest := host.NewHostScanRequest() if err := p.sbomScanner.Scan(scanRequest); err != nil { log.Errorf("Failed to trigger SBOM generation for host: %s", err) diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect.go b/pkg/collector/corechecks/servicediscovery/apm/detect.go index 8a5419676d0f3..a1ad2d303fe62 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/detect.go +++ b/pkg/collector/corechecks/servicediscovery/apm/detect.go @@ -19,7 +19,6 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language/reader" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" "github.com/DataDog/datadog-agent/pkg/util/kernel" @@ -48,13 +47,6 @@ var ( language.Python: pythonDetector, language.Go: goDetector, } - // For now, only allow a subset of the above detectors to actually run. - allowedLangs = map[language.Language]struct{}{ - language.Java: {}, - language.Node: {}, - language.Python: {}, - language.Go: {}, - } nodeAPMCheckRegex = regexp.MustCompile(`"dd-trace"`) ) @@ -66,10 +58,6 @@ func Detect(pid int, args []string, envs map[string]string, lang language.Langua return Injected } - if _, ok := allowedLangs[lang]; !ok { - return None - } - // different detection for provided instrumentation for each if detect, ok := detectorMap[lang]; ok { return detect(pid, args, envs, contextMap) @@ -118,12 +106,16 @@ func goDetector(pid int, _ []string, _ map[string]string, _ usm.DetectorContextM } defer elfFile.Close() - _, err = bininspect.GetAnySymbolWithPrefix(elfFile, ddTraceGoPrefix, ddTraceGoMaxLength) - if err != nil { - return None + if _, err = bininspect.GetAnySymbolWithPrefix(elfFile, ddTraceGoPrefix, ddTraceGoMaxLength); err == nil { + return Provided } - return Provided + // We failed to find symbols in the regular symbols section, now we can try the pclntab + if _, err = bininspect.GetAnySymbolWithPrefixPCLNTAB(elfFile, ddTraceGoPrefix, ddTraceGoMaxLength); err == nil { + return Provided + } + return None + } func pythonDetectorFromMapsReader(reader io.Reader) Instrumentation { @@ -246,93 +238,44 @@ func javaDetector(_ int, args []string, envs map[string]string, _ usm.DetectorCo return None } -func findFile(fileName string) (io.ReadCloser, bool) { - f, err := os.Open(fileName) - if err != nil { - return nil, false - } - return f, true -} - -const datadogDotNetInstrumented = "Datadog.Trace.ClrProfiler.Native" +func dotNetDetectorFromMapsReader(reader io.Reader) Instrumentation { + scanner := bufio.NewScanner(bufio.NewReader(reader)) + for scanner.Scan() { + line := scanner.Text() -func dotNetDetector(_ int, args []string, envs map[string]string, _ usm.DetectorContextMap) Instrumentation { - // if it's just the word `dotnet` by itself, don't instrument - if len(args) == 1 && args[0] == "dotnet" { - return None + if strings.HasSuffix(line, "Datadog.Trace.dll") { + return Provided + } } - /* - From Kevin Gosse: - - CORECLR_ENABLE_PROFILING=1 - - CORECLR_PROFILER_PATH environment variables set - (it means that a profiler is attached, it doesn't really matter if it's ours or another vendor) - */ - // don't instrument if the tracer is already installed - foundFlags := 0 - if _, ok := envs["CORECLR_PROFILER_PATH"]; ok { - foundFlags |= 1 - } + return None +} + +// dotNetDetector detects instrumentation in .NET applications. +// +// The primary check is for the environment variables which enables .NET +// profiling. This is required for auto-instrumentation, and besides that custom +// instrumentation using version 3.0.0 or later of Datadog.Trace requires +// auto-instrumentation. It is also set if some third-party +// profiling/instrumentation is active. +// +// The secondary check is to detect cases where an older version of +// Datadog.Trace is used for manual instrumentation without enabling +// auto-instrumentation. For this, we check for the presence of the DLL in the +// maps file. Note that this does not work for single-file deployments. +// +// 785c8a400000-785c8aaeb000 r--s 00000000 fc:06 12762267 /home/foo/.../publish/Datadog.Trace.dll +func dotNetDetector(pid int, _ []string, envs map[string]string, _ usm.DetectorContextMap) Instrumentation { if val, ok := envs["CORECLR_ENABLE_PROFILING"]; ok && val == "1" { - foundFlags |= 2 - } - if foundFlags == 3 { return Provided } - ignoreArgs := map[string]bool{ - "build": true, - "clean": true, - "restore": true, - "publish": true, - } - - if len(args) > 1 { - // Ignore only if the first arg match with the ignore list - if ignoreArgs[args[1]] { - return None - } - // Check to see if there's a DLL on the command line that contain the string Datadog.Trace.ClrProfiler.Native - // If so, it's already instrumented with Datadog, ignore the process - for _, v := range args[1:] { - if strings.HasSuffix(v, ".dll") { - if f, ok := findFile(v); ok { - defer f.Close() - offset, err := reader.Index(f, datadogDotNetInstrumented) - if offset != -1 && err == nil { - return Provided - } - } - } - } - } - - // does the binary contain the string Datadog.Trace.ClrProfiler.Native (this should cover all single-file deployments) - // if so, it's already instrumented with Datadog, ignore the process - if f, ok := findFile(args[0]); ok { - defer f.Close() - offset, err := reader.Index(f, datadogDotNetInstrumented) - if offset != -1 && err == nil { - return Provided - } - } - - // check if there's a .dll in the directory with the same name as the binary used to launch it - // if so, check if it has the Datadog.Trace.ClrProfiler.Native string - // if so, it's already instrumented with Datadog, ignore the process - if f, ok := findFile(args[0] + ".dll"); ok { - defer f.Close() - offset, err := reader.Index(f, datadogDotNetInstrumented) - if offset != -1 && err == nil { - return Provided - } + mapsPath := kernel.HostProc(strconv.Itoa(pid), "maps") + mapsFile, err := os.Open(mapsPath) + if err != nil { + return None } + defer mapsFile.Close() - // does the application folder contain the file Datadog.Trace.dll (this should cover "classic" deployments) - // if so, it's already instrumented with Datadog, ignore the process - if f, ok := findFile("Datadog.Trace.dll"); ok { - f.Close() - return Provided - } - return None + return dotNetDetectorFromMapsReader(mapsFile) } diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go index f2977cd14f998..79570d4159deb 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go +++ b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go @@ -173,21 +173,98 @@ func Test_pythonDetector(t *testing.T) { } } +func TestDotNetDetector(t *testing.T) { + for _, test := range []struct { + name string + env map[string]string + maps string + result Instrumentation + }{ + { + name: "no env, no maps", + result: None, + }, + { + name: "profiling disabled", + env: map[string]string{ + "CORECLR_ENABLE_PROFILING": "0", + }, + result: None, + }, + { + name: "profiling enabled", + env: map[string]string{ + "CORECLR_ENABLE_PROFILING": "1", + }, + result: Provided, + }, + { + name: "not in maps", + maps: ` +785c8ab24000-785c8ab2c000 r--s 00000000 fc:06 12762114 /home/foo/hello/bin/release/net8.0/linux-x64/publish/System.Diagnostics.StackTrace.dll +785c8ab2c000-785c8acce000 r--s 00000000 fc:06 12762148 /home/foo/hello/bin/release/net8.0/linux-x64/publish/System.Net.Http.dll + `, + result: None, + }, + { + name: "in maps, no env", + maps: ` +785c89c00000-785c8a400000 rw-p 00000000 00:00 0 +785c8a400000-785c8aaeb000 r--s 00000000 fc:06 12762267 /home/foo/hello/bin/release/net8.0/linux-x64/publish/Datadog.Trace.dll +785c8aaec000-785c8ab0d000 rw-p 00000000 00:00 0 +785c8ab0d000-785c8ab24000 r--s 00000000 fc:06 12761829 /home/foo/hello/bin/release/net8.0/linux-x64/publish/System.Collections.Specialized.dll + `, + result: Provided, + }, + { + name: "in maps, env misleading", + env: map[string]string{ + "CORECLR_ENABLE_PROFILING": "0", + }, + maps: ` +785c8a400000-785c8aaeb000 r--s 00000000 fc:06 12762267 /home/foo/hello/bin/release/net8.0/linux-x64/publish/Datadog.Trace.dll + `, + result: Provided, + }, + } { + t.Run(test.name, func(t *testing.T) { + var result Instrumentation + if test.maps == "" { + result = dotNetDetector(0, nil, test.env, nil) + } else { + result = dotNetDetectorFromMapsReader(strings.NewReader(test.maps)) + } + assert.Equal(t, test.result, result) + }) + } +} + func TestGoDetector(t *testing.T) { curDir, err := testutil.CurDir() require.NoError(t, err) - serverBin, err := usmtestutil.BuildGoBinaryWrapper(filepath.Join(curDir, "testutil"), "instrumented") + serverBinWithSymbols, err := usmtestutil.BuildGoBinaryWrapper(filepath.Join(curDir, "testutil"), "instrumented") + require.NoError(t, err) + serverBinWithoutSymbols, err := usmtestutil.BuildGoBinaryWrapperWithoutSymbols(filepath.Join(curDir, "testutil"), "instrumented") require.NoError(t, err) - cmd := exec.Command(serverBin) - require.NoError(t, cmd.Start()) + cmdWithSymbols := exec.Command(serverBinWithSymbols) + require.NoError(t, cmdWithSymbols.Start()) t.Cleanup(func() { - _ = cmd.Process.Kill() + _ = cmdWithSymbols.Process.Kill() + }) + + cmdWithoutSymbols := exec.Command(serverBinWithoutSymbols) + require.NoError(t, cmdWithoutSymbols.Start()) + t.Cleanup(func() { + _ = cmdWithoutSymbols.Process.Kill() }) result := goDetector(os.Getpid(), nil, nil, nil) - require.Equal(t, result, None) + require.Equal(t, None, result) + + result = goDetector(cmdWithSymbols.Process.Pid, nil, nil, nil) + require.Equal(t, Provided, result) - result = goDetector(cmd.Process.Pid, nil, nil, nil) - require.Equal(t, result, Provided) + result = goDetector(cmdWithoutSymbols.Process.Pid, nil, nil, nil) + require.Equal(t, Provided, result) } diff --git a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/.gitignore b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/.gitignore index fbce16df4eb9d..be71b320651da 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/.gitignore +++ b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/.gitignore @@ -1 +1,2 @@ instrumented +instrumented-nosymbols diff --git a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go index ab82025a4488a..74b92599e66cb 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go +++ b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/instrumented.go @@ -8,9 +8,12 @@ package main import ( + "time" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) func main() { tracer.Start() + time.Sleep(time.Second * 20) } diff --git a/pkg/collector/corechecks/servicediscovery/events.go b/pkg/collector/corechecks/servicediscovery/events.go index aa02df577a7e9..ae25068e956b0 100644 --- a/pkg/collector/corechecks/servicediscovery/events.go +++ b/pkg/collector/corechecks/servicediscovery/events.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -58,7 +58,7 @@ type telemetrySender struct { func (ts *telemetrySender) newEvent(t eventType, svc serviceInfo) *event { host := ts.hostname.GetSafe(context.Background()) - env := pkgconfig.Datadog().GetString("env") + env := pkgconfigsetup.Datadog().GetString("env") nameSource := "" if svc.service.DDService != "" { diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux.go b/pkg/collector/corechecks/servicediscovery/impl_linux.go index 27ef6aa36cc66..5478b38f507d1 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/servicetype" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processnet "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -161,6 +161,6 @@ type systemProbeClient interface { func getSysProbeClient() (systemProbeClient, error) { return processnet.GetRemoteSystemProbeUtil( - ddconfig.SystemProbe().GetString("system_probe_config.sysprobe_socket"), + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket"), ) } diff --git a/pkg/collector/corechecks/servicediscovery/language/language.go b/pkg/collector/corechecks/servicediscovery/language/language.go index dd4b224f9bc47..fdb16329d58d5 100644 --- a/pkg/collector/corechecks/servicediscovery/language/language.go +++ b/pkg/collector/corechecks/servicediscovery/language/language.go @@ -3,20 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux - // Package language provides functionality to detect the programming language for a given process. package language -import ( - "path/filepath" - - "github.com/DataDog/datadog-agent/pkg/languagedetection" - "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" - "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" - "github.com/DataDog/datadog-agent/pkg/process/procutil" -) - // Language represents programming languages. type Language string @@ -40,67 +29,3 @@ const ( // PHP represents PHP. PHP Language = "php" ) - -var ( - // languageNameToLanguageMap translates the constants rom the - // languagedetection package to the constants used in this file. The latter - // are shared with the backend, and at least java/jvm differs in the name - // from the languagedetection package. - languageNameToLanguageMap = map[languagemodels.LanguageName]Language{ - languagemodels.Go: Go, - languagemodels.Node: Node, - languagemodels.Dotnet: DotNet, - languagemodels.Python: Python, - languagemodels.Java: Java, - languagemodels.Ruby: Ruby, - } -) - -// ProcessInfo holds information about a process. -type ProcessInfo struct { - Args []string - Envs map[string]string -} - -// FindInArgs tries to detect the language only using the provided command line arguments. -func FindInArgs(exe string, args []string) Language { - // empty slice passed in - if len(args) == 0 { - return "" - } - - langs := languagedetection.DetectLanguage([]languagemodels.Process{&procutil.Process{ - // Pid doesn't matter since sysprobeConfig is nil - Pid: 0, - Cmdline: args, - Comm: filepath.Base(exe), - }}, nil) - if len(langs) == 0 { - return "" - } - - lang := langs[0] - if lang == nil { - return "" - } - if outLang, ok := languageNameToLanguageMap[lang.Name]; ok { - return outLang - } - - return "" -} - -// FindUsingPrivilegedDetector tries to detect the language using the provided command line arguments -func FindUsingPrivilegedDetector(detector privileged.LanguageDetector, pid int32) Language { - langs := detector.DetectWithPrivileges([]languagemodels.Process{&procutil.Process{Pid: pid}}) - if len(langs) == 0 { - return "" - } - - lang := langs[0] - if outLang, ok := languageNameToLanguageMap[lang.Name]; ok { - return outLang - } - - return "" -} diff --git a/pkg/collector/corechecks/servicediscovery/language/language_linux.go b/pkg/collector/corechecks/servicediscovery/language/language_linux.go new file mode 100644 index 0000000000000..4149070faf06d --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/language/language_linux.go @@ -0,0 +1,79 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package language + +import ( + "path/filepath" + + "github.com/DataDog/datadog-agent/pkg/languagedetection" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" + "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" + "github.com/DataDog/datadog-agent/pkg/process/procutil" +) + +// languageNameToLanguageMap translates the constants rom the +// languagedetection package to the constants used in this file. The latter +// are shared with the backend, and at least java/jvm differs in the name +// from the languagedetection package. +var languageNameToLanguageMap = map[languagemodels.LanguageName]Language{ + languagemodels.Go: Go, + languagemodels.Node: Node, + languagemodels.Dotnet: DotNet, + languagemodels.Python: Python, + languagemodels.Java: Java, + languagemodels.Ruby: Ruby, +} + +// ProcessInfo holds information about a process. +type ProcessInfo struct { + Args []string + Envs map[string]string +} + +// FindInArgs tries to detect the language only using the provided command line arguments. +func FindInArgs(exe string, args []string) Language { + // empty slice passed in + if len(args) == 0 { + return "" + } + + langs := languagedetection.DetectLanguage([]languagemodels.Process{&procutil.Process{ + // Pid doesn't matter since sysprobeConfig is nil + Pid: 0, + Cmdline: args, + Comm: filepath.Base(exe), + }}, nil) + if len(langs) == 0 { + return "" + } + + lang := langs[0] + if lang == nil { + return "" + } + if outLang, ok := languageNameToLanguageMap[lang.Name]; ok { + return outLang + } + + return "" +} + +// FindUsingPrivilegedDetector tries to detect the language using the provided command line arguments +func FindUsingPrivilegedDetector(detector privileged.LanguageDetector, pid int32) Language { + langs := detector.DetectWithPrivileges([]languagemodels.Process{&procutil.Process{Pid: pid}}) + if len(langs) == 0 { + return "" + } + + lang := langs[0] + if outLang, ok := languageNameToLanguageMap[lang.Name]; ok { + return outLang + } + + return "" +} diff --git a/pkg/collector/corechecks/servicediscovery/module/config.go b/pkg/collector/corechecks/servicediscovery/module/config.go new file mode 100644 index 0000000000000..1cf73abf465fd --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/config.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package module + +import ( + "strings" + "time" + + sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" + ddconfig "github.com/DataDog/datadog-agent/pkg/config/setup" +) + +const discoveryNS = "discovery" + +type discoveryConfig struct { + cpuUsageUpdateDelay time.Duration +} + +func newConfig() *discoveryConfig { + cfg := ddconfig.SystemProbe() + sysconfig.Adjust(cfg) + + return &discoveryConfig{ + cpuUsageUpdateDelay: cfg.GetDuration(join(discoveryNS, "cpu_usage_update_delay")), + } +} + +func join(pieces ...string) string { + return strings.Join(pieces, ".") +} diff --git a/pkg/collector/corechecks/servicediscovery/module/envs.go b/pkg/collector/corechecks/servicediscovery/module/envs.go index 08f5c54994565..5ae6ebf797e96 100644 --- a/pkg/collector/corechecks/servicediscovery/module/envs.go +++ b/pkg/collector/corechecks/servicediscovery/module/envs.go @@ -15,24 +15,40 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/util/kernel" + "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/shirou/gopsutil/v3/process" ) const ( - // injectorMemFdName is the name the injector (Datadog/auto_inject) uses. - injectorMemFdName = "dd_environ" - injectorMemFdPath = "/memfd:" + injectorMemFdName + " (deleted)" + injectorMemFdName = "dd_process_inject_info.msgpack" + injectorMemFdPath = "/memfd:" + injectorMemFdName // memFdMaxSize is used to limit the amount of data we read from the memfd. // This is for safety to limit our memory usage in the case of a corrupt // file. - memFdMaxSize = 4096 + // matches limit in the [auto injector](https://github.com/DataDog/auto_inject/blob/5ae819d01d8625c24dcf45b8fef32a7d94927d13/librouter.c#L52) + memFdMaxSize = 65536 ) -// readEnvsFile reads the env file created by the auto injector. The file -// contains the variables in a format similar to /proc/$PID/environ: ENV=VAL, -// separated by \000. -func readEnvsFile(path string) ([]string, error) { +// getInjectionMeta gets metadata from auto injector injection, if +// present. The auto injector creates a memfd file where it writes +// injection metadata such as injected environment variables, or versions +// of the auto injector and the library. +func getInjectionMeta(proc *process.Process) (*InjectedProcess, bool) { + path, found := findInjectorFile(proc) + if !found { + return nil, false + } + injectionMeta, err := extractInjectionMeta(path) + if err != nil { + log.Warnf("failed extracting injected envs: %s", err) + return nil, false + } + return injectionMeta, true + +} + +func extractInjectionMeta(path string) (*InjectedProcess, error) { reader, err := os.Open(path) if err != nil { return nil, err @@ -43,13 +59,19 @@ func readEnvsFile(path string) ([]string, error) { if err != nil { return nil, err } + if len(data) == memFdMaxSize { + return nil, io.ErrShortBuffer + } - return strings.Split(string(data), "\000"), nil + var injectedProc InjectedProcess + if _, err = injectedProc.UnmarshalMsg(data); err != nil { + return nil, err + } + return &injectedProc, nil } -// getInjectedEnvs gets environment variables injected by the auto injector, if -// present. The auto injector creates a memfd file with a specific name into which -// it writes the environment variables. In order to find the correct file, we +// findInjectorFile searches for the injector file in the process open file descriptors. +// In order to find the correct file, we // need to iterate the list of files (named after file descriptor numbers) in // /proc/$PID/fd and get the name from the target of the symbolic link. // @@ -59,57 +81,75 @@ func readEnvsFile(path string) ([]string, error) { // lrwx------ 1 foo foo 64 Aug 13 14:24 0 -> /dev/pts/6 // lrwx------ 1 foo foo 64 Aug 13 14:24 1 -> /dev/pts/6 // lrwx------ 1 foo foo 64 Aug 13 14:24 2 -> /dev/pts/6 -// lrwx------ 1 foo foo 64 Aug 13 14:24 3 -> '/memfd:dd_environ (deleted)' +// lrwx------ 1 foo foo 64 Aug 13 14:24 3 -> '/dd_process_inject_info.msgpac (deleted)' // ``` -func getInjectedEnvs(proc *process.Process) []string { +func findInjectorFile(proc *process.Process) (string, bool) { fdsPath := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "fd") - entries, err := os.ReadDir(fdsPath) + // quick path, the shadow file is the first opened file by the process + // unless there are inherited fds + path := filepath.Join(fdsPath, "3") + if isInjectorFile(path) { + return path, true + } + fdDir, err := os.Open(fdsPath) if err != nil { - return nil + log.Warnf("failed to open %s: %s", fdsPath, err) + return "", false } - - for _, entry := range entries { - path := filepath.Join(fdsPath, entry.Name()) - name, err := os.Readlink(path) - if err != nil { - continue - } - - if name != injectorMemFdPath { + defer fdDir.Close() + fds, err := fdDir.Readdirnames(-1) + if err != nil { + log.Warnf("failed to read %s: %s", fdsPath, err) + return "", false + } + for _, fd := range fds { + switch fd { + case "0", "1", "2", "3": continue + default: + path := filepath.Join(fdsPath, fd) + if isInjectorFile(path) { + return path, true + } } - - envs, _ := readEnvsFile(path) - return envs } + return "", false +} - return nil +func isInjectorFile(path string) bool { + name, err := os.Readlink(path) + if err != nil { + return false + } + return strings.HasPrefix(name, injectorMemFdPath) } -// envsToMap splits a list of strings containing environment variables of the +// addEnvToMap splits a list of strings containing environment variables of the // format NAME=VAL to a map. -func envsToMap(envs ...string) map[string]string { - envMap := make(map[string]string, len(envs)) - for _, env := range envs { - name, val, found := strings.Cut(env, "=") - if !found { - continue - } - - envMap[name] = val +func addEnvToMap(env string, envs map[string]string) { + name, val, found := strings.Cut(env, "=") + if found { + envs[name] = val } - - return envMap } // getEnvs gets the environment variables for the process, both the initial // ones, and if present, the ones injected via the auto injector. func getEnvs(proc *process.Process) (map[string]string, error) { - envs, err := proc.Environ() + procEnvs, err := proc.Environ() if err != nil { return nil, err } - - envs = append(envs, getInjectedEnvs(proc)...) - return envsToMap(envs...), nil + envs := make(map[string]string, len(procEnvs)) + for _, env := range procEnvs { + addEnvToMap(env, envs) + } + injectionMeta, ok := getInjectionMeta(proc) + if !ok { + return envs, nil + } + for _, env := range injectionMeta.InjectedEnv { + addEnvToMap(string(env), envs) + } + return envs, nil } diff --git a/pkg/collector/corechecks/servicediscovery/module/envs_test.go b/pkg/collector/corechecks/servicediscovery/module/envs_test.go index 7ef168b963f67..9a56755c097b6 100644 --- a/pkg/collector/corechecks/servicediscovery/module/envs_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/envs_test.go @@ -8,7 +8,6 @@ package module import ( - "bytes" "fmt" "os" "strings" @@ -23,8 +22,9 @@ func TestInjectedEnvBasic(t *testing.T) { curPid := os.Getpid() proc, err := process.NewProcess(int32(curPid)) require.NoError(t, err) - envs := getInjectedEnvs(proc) - require.Nil(t, envs) + injectionMeta, ok := getInjectionMeta(proc) + require.Nil(t, injectionMeta) + require.False(t, ok) // Provide an injected replacement for some already-present env variable first := os.Environ()[0] @@ -49,12 +49,10 @@ func TestInjectedEnvLimit(t *testing.T) { full := []string{env} createEnvsMemfd(t, full) - expected := []string{full[0][:memFdMaxSize]} - proc, err := process.NewProcess(int32(os.Getpid())) require.NoError(t, err) - envs := getInjectedEnvs(proc) - require.Equal(t, expected, envs) + _, ok := getInjectionMeta(proc) + require.False(t, ok) } // createEnvsMemfd creates an memfd in the current process with the specified @@ -62,16 +60,14 @@ func TestInjectedEnvLimit(t *testing.T) { func createEnvsMemfd(t *testing.T, envs []string) { t.Helper() - var b bytes.Buffer + var injectionMeta InjectedProcess for _, env := range envs { - _, err := b.WriteString(env) - require.NoError(t, err) - - err = b.WriteByte(0) - require.NoError(t, err) + injectionMeta.InjectedEnv = append(injectionMeta.InjectedEnv, []byte(env)) } + encodedInjectionMeta, err := injectionMeta.MarshalMsg(nil) + require.NoError(t, err) - memfd, err := memfile(injectorMemFdName, b.Bytes()) + memfd, err := memfile(injectorMemFdName, encodedInjectionMeta) require.NoError(t, err) t.Cleanup(func() { unix.Close(memfd) }) } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index 7d7237c38484b..9646bb4d31f28 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -7,15 +7,18 @@ package module import ( "bufio" + "cmp" "errors" "fmt" "io" "net/http" "os" "path/filepath" + "slices" "strconv" "strings" "sync" + "time" "github.com/shirou/gopsutil/v3/process" @@ -28,6 +31,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" "github.com/DataDog/datadog-agent/pkg/languagedetection/privileged" + "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -51,10 +55,13 @@ type serviceInfo struct { cmdLine []string startTimeSecs uint64 cpuTime uint64 + cpuUsage float64 } // discovery is an implementation of the Module interface for the discovery module. type discovery struct { + config *discoveryConfig + mux *sync.RWMutex // cache maps pids to data that should be cached between calls to the endpoint. cache map[int32]*serviceInfo @@ -68,11 +75,15 @@ type discovery struct { // lastGlobalCPUTime stores the total cpu time of the system from the last time // the endpoint was called. lastGlobalCPUTime uint64 + + // lastCPUTimeUpdate is the last time lastGlobalCPUTime was updated. + lastCPUTimeUpdate time.Time } // NewDiscoveryModule creates a new discovery system probe module. func NewDiscoveryModule(*sysconfigtypes.Config, module.FactoryDependencies) (module.Module, error) { return &discovery{ + config: newConfig(), mux: &sync.RWMutex{}, cache: make(map[int32]*serviceInfo), privilegedDetector: privileged.NewLanguageDetector(), @@ -132,7 +143,6 @@ func getSockets(pid int32) ([]uint64, error) { } defer d.Close() fnames, err := d.Readdirnames(-1) - if err != nil { return nil, err } @@ -230,7 +240,7 @@ func parseNetIPSocketLine(fields []string, expectedState uint64) (uint64, uint16 // newNetIPSocket reads the content of the provided file and returns a map of socket inodes to ports. // Based on newNetIPSocket() in net_ip_socket.go from github.com/prometheus/procfs -func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error) { +func newNetIPSocket(file string, expectedState uint64, shouldIgnore func(uint16) bool) (map[uint64]uint16, error) { f, err := os.Open(file) if err != nil { return nil, err @@ -248,6 +258,11 @@ func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error if err != nil { continue } + + if shouldIgnore != nil && shouldIgnore(port) { + continue + } + netIPSocket[inode] = port } if err := s.Err(); err != nil { @@ -260,19 +275,31 @@ func newNetIPSocket(file string, expectedState uint64) (map[uint64]uint16, error // protocols for the provided namespace. Based on snapshotBoundSockets() in // pkg/security/security_profile/activity_tree/process_node_snapshot.go. func getNsInfo(pid int) (*namespaceInfo, error) { - tcp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp", pid)), tcpListen) + // Don't ignore ephemeral ports on TCP, unlike on UDP (see below). + var noIgnore func(uint16) bool + tcp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp", pid)), tcpListen, noIgnore) if err != nil { log.Debugf("couldn't snapshot TCP sockets: %v", err) } - udp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp", pid)), udpListen) + udp, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp", pid)), udpListen, + func(port uint16) bool { + // As in NPM (see initializePortBind() in + // pkg/network/tracer/connection): Ignore ephemeral port binds on + // UDP as they are more likely to be from clients calling bind with + // port 0. + return network.IsPortInEphemeralRange(network.AFINET, network.UDP, port) == network.EphemeralTrue + }) if err != nil { log.Debugf("couldn't snapshot UDP sockets: %v", err) } - tcpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp6", pid)), tcpListen) + tcpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/tcp6", pid)), tcpListen, noIgnore) if err != nil { log.Debugf("couldn't snapshot TCP6 sockets: %v", err) } - udpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp6", pid)), udpListen) + udpv6, err := newNetIPSocket(kernel.HostProc(fmt.Sprintf("%d/net/udp6", pid)), udpListen, + func(port uint16) bool { + return network.IsPortInEphemeralRange(network.AFINET6, network.UDP, port) == network.EphemeralTrue + }) if err != nil { log.Debugf("couldn't snapshot UDP6 sockets: %v", err) } @@ -293,9 +320,8 @@ func getNsInfo(pid int) (*namespaceInfo, error) { // parsingContext holds temporary context not preserved between invocations of // the endpoint. type parsingContext struct { - procRoot string - netNsInfo map[uint32]*namespaceInfo - globalCPUTime uint64 + procRoot string + netNsInfo map[uint32]*namespaceInfo } // getServiceInfo gets the service information for a process using the @@ -324,11 +350,11 @@ func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error) contextMap := make(usm.DetectorContextMap) root := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "root") - nameMeta := servicediscovery.GetServiceName(cmdline, envs, root, contextMap) lang := language.FindInArgs(exe, cmdline) if lang == "" { lang = language.FindUsingPrivilegedDetector(s.privilegedDetector, proc.Pid) } + nameMeta := servicediscovery.GetServiceName(cmdline, envs, root, lang, contextMap) apmInstrumentation := apm.Detect(int(proc.Pid), cmdline, envs, lang, contextMap) return &serviceInfo{ @@ -372,6 +398,10 @@ var ignoreComms = map[string]struct{}{ "docker-proxy": {}, } +// maxNumberOfPorts is the maximum number of listening ports which we report per +// service. +const maxNumberOfPorts = 50 + // getService gets information for a single service. func (s *discovery) getService(context parsingContext, pid int32) *model.Service { proc, err := customNewProcess(pid) @@ -433,6 +463,16 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service return nil } + if len(ports) > maxNumberOfPorts { + // Sort the list so that non-ephemeral ports are given preference when + // we trim the list. + portCmp := func(a, b uint16) int { + return cmp.Compare(a, b) + } + slices.SortFunc(ports, portCmp) + ports = ports[:maxNumberOfPorts] + } + rss, err := getRSS(proc) if err != nil { return nil @@ -460,11 +500,6 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service name = info.generatedName } - cpu, err := updateCPUCoresStats(proc, info, s.lastGlobalCPUTime, context.globalCPUTime) - if err != nil { - return nil - } - return &model.Service{ PID: int(pid), Name: name, @@ -477,7 +512,7 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service RSS: rss, CommandLine: info.cmdLine, StartTimeSecs: info.startTimeSecs, - CPUCores: cpu, + CPUCores: info.cpuUsage, } } @@ -496,6 +531,38 @@ func (s *discovery) cleanCache(alivePids map[int32]struct{}) { } } +// updateServicesCPUStats updates the CPU stats of cached services, as well as the +// global CPU time cache for future updates. +func (s *discovery) updateServicesCPUStats(services []model.Service) error { + s.mux.Lock() + defer s.mux.Unlock() + + if time.Since(s.lastCPUTimeUpdate) < s.config.cpuUsageUpdateDelay { + return nil + } + + globalCPUTime, err := getGlobalCPUTime() + if err != nil { + return fmt.Errorf("could not get global CPU time: %w", err) + } + + for i := range services { + service := &services[i] + serviceInfo, ok := s.cache[int32(service.PID)] + if !ok { + continue + } + + _ = updateCPUCoresStats(service.PID, serviceInfo, s.lastGlobalCPUTime, globalCPUTime) + service.CPUCores = serviceInfo.cpuUsage + } + + s.lastGlobalCPUTime = globalCPUTime + s.lastCPUTimeUpdate = time.Now() + + return nil +} + // getStatus returns the list of currently running services. func (s *discovery) getServices() (*[]model.Service, error) { procRoot := kernel.ProcFSRoot() @@ -504,15 +571,9 @@ func (s *discovery) getServices() (*[]model.Service, error) { return nil, err } - globalCPUTime, err := getGlobalCPUTime() - if err != nil { - return nil, err - } - context := parsingContext{ - procRoot: procRoot, - netNsInfo: make(map[uint32]*namespaceInfo), - globalCPUTime: globalCPUTime, + procRoot: procRoot, + netNsInfo: make(map[uint32]*namespaceInfo), } var services []model.Service @@ -530,7 +591,10 @@ func (s *discovery) getServices() (*[]model.Service, error) { } s.cleanCache(alivePids) - s.lastGlobalCPUTime = context.globalCPUTime + + if err = s.updateServicesCPUStats(services); err != nil { + return nil, err + } return &services, nil } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 7997723188262..a88c31590efa1 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -20,6 +20,7 @@ import ( "path/filepath" "regexp" "runtime" + "slices" "strconv" "strings" "syscall" @@ -44,6 +45,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/apm" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" + "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" protocolUtils "github.com/DataDog/datadog-agent/pkg/network/protocols/testutil" "github.com/DataDog/datadog-agent/pkg/network/protocols/tls/nodejs" @@ -70,7 +72,15 @@ func setupDiscoveryModule(t *testing.T) string { m := module.Factory{ Name: config.DiscoveryModule, ConfigNamespaces: []string{"discovery"}, - Fn: NewDiscoveryModule, + Fn: func(cfg *types.Config, deps module.FactoryDependencies) (module.Module, error) { + module, err := NewDiscoveryModule(cfg, deps) + if err != nil { + return nil, err + } + + module.(*discovery).config.cpuUsageUpdateDelay = time.Second + return module, nil + }, NeedsEBPF: func() bool { return false }, @@ -110,8 +120,8 @@ func getServicesMap(t *testing.T, url string) map[int]model.Service { return servicesMap } -func startTCPServer(t *testing.T, proto string) (*os.File, *net.TCPAddr) { - listener, err := net.Listen(proto, "") +func startTCPServer(t *testing.T, proto string, address string) (*os.File, *net.TCPAddr) { + listener, err := net.Listen(proto, address) require.NoError(t, err) t.Cleanup(func() { _ = listener.Close() }) tcpAddr := listener.Addr().(*net.TCPAddr) @@ -135,8 +145,8 @@ func startTCPClient(t *testing.T, proto string, server *net.TCPAddr) (*os.File, return f, client.LocalAddr().(*net.TCPAddr) } -func startUDPServer(t *testing.T, proto string) (*os.File, *net.UDPAddr) { - lnPacket, err := net.ListenPacket(proto, "") +func startUDPServer(t *testing.T, proto string, address string) (*os.File, *net.UDPAddr) { + lnPacket, err := net.ListenPacket(proto, address) require.NoError(t, err) t.Cleanup(func() { _ = lnPacket.Close() }) @@ -189,7 +199,7 @@ func TestBasic(t *testing.T) { expectedPorts := make(map[int]int) var startTCP = func(proto string) { - f, server := startTCPServer(t, proto) + f, server := startTCPServer(t, proto, "") cmd := startProcessWithFile(t, f) expectedPIDs = append(expectedPIDs, cmd.Process.Pid) expectedPorts[cmd.Process.Pid] = server.Port @@ -200,7 +210,7 @@ func TestBasic(t *testing.T) { } var startUDP = func(proto string) { - f, server := startUDPServer(t, proto) + f, server := startUDPServer(t, proto, ":8083") cmd := startProcessWithFile(t, f) expectedPIDs = append(expectedPIDs, cmd.Process.Pid) expectedPorts[cmd.Process.Pid] = server.Port @@ -241,7 +251,7 @@ func TestPorts(t *testing.T) { var unexpectedPorts []uint16 var startTCP = func(proto string) { - serverf, server := startTCPServer(t, proto) + serverf, server := startTCPServer(t, proto, "") t.Cleanup(func() { serverf.Close() }) clientf, client := startTCPClient(t, proto, server) t.Cleanup(func() { clientf.Close() }) @@ -251,13 +261,17 @@ func TestPorts(t *testing.T) { } var startUDP = func(proto string) { - serverf, server := startUDPServer(t, proto) + serverf, server := startUDPServer(t, proto, ":8083") t.Cleanup(func() { _ = serverf.Close() }) clientf, client := startUDPClient(t, proto, server) t.Cleanup(func() { clientf.Close() }) expectedPorts = append(expectedPorts, uint16(server.Port)) unexpectedPorts = append(unexpectedPorts, uint16(client.Port)) + + ephemeralf, ephemeral := startUDPServer(t, proto, "") + t.Cleanup(func() { _ = ephemeralf.Close() }) + unexpectedPorts = append(unexpectedPorts, uint16(ephemeral.Port)) } startTCP("tcp4") @@ -276,6 +290,40 @@ func TestPorts(t *testing.T) { } } +func TestPortsLimits(t *testing.T) { + url := setupDiscoveryModule(t) + + var expectedPorts []int + + var openPort = func(address string) { + serverf, server := startTCPServer(t, "tcp4", address) + t.Cleanup(func() { serverf.Close() }) + + expectedPorts = append(expectedPorts, server.Port) + } + + openPort("127.0.0.1:8081") + + for i := 0; i < maxNumberOfPorts; i++ { + openPort("") + } + + openPort("127.0.0.1:8082") + + slices.Sort(expectedPorts) + + serviceMap := getServicesMap(t, url) + pid := os.Getpid() + require.Contains(t, serviceMap, pid) + ports := serviceMap[pid].Ports + assert.Contains(t, ports, uint16(8081)) + assert.Contains(t, ports, uint16(8082)) + assert.Len(t, ports, maxNumberOfPorts) + for i := 0; i < maxNumberOfPorts-2; i++ { + assert.Contains(t, ports, uint16(expectedPorts[i])) + } +} + func TestServiceName(t *testing.T) { url := setupDiscoveryModule(t) @@ -295,7 +343,7 @@ func TestServiceName(t *testing.T) { cmd := exec.CommandContext(ctx, "sleep", "1000") cmd.Dir = "/tmp/" cmd.Env = append(cmd.Env, "OTHER_ENV=test") - cmd.Env = append(cmd.Env, "DD_SERVICE=foobar") + cmd.Env = append(cmd.Env, "DD_SERVICE=foo😀bar") cmd.Env = append(cmd.Env, "YET_OTHER_ENV=test") err = cmd.Start() require.NoError(t, err) @@ -306,7 +354,8 @@ func TestServiceName(t *testing.T) { require.EventuallyWithT(t, func(collect *assert.CollectT) { portMap := getServicesMap(t, url) assert.Contains(collect, portMap, pid) - assert.Equal(t, "foobar", portMap[pid].DDService) + // Non-ASCII character removed due to normalization. + assert.Equal(t, "foo_bar", portMap[pid].DDService) assert.Equal(t, portMap[pid].DDService, portMap[pid].Name) assert.Equal(t, "sleep", portMap[pid].GeneratedName) assert.False(t, portMap[pid].DDServiceInjected) @@ -378,7 +427,7 @@ func buildFakeServer(t *testing.T) string { serverBin, err := usmtestutil.BuildGoBinaryWrapper(filepath.Join(curDir, "testutil"), "fake_server") require.NoError(t, err) - for _, alias := range []string{"java", "node", "sshd"} { + for _, alias := range []string{"java", "node", "sshd", "dotnet"} { makeAlias(t, alias, serverBin) } @@ -452,7 +501,15 @@ func TestAPMInstrumentationProvided(t *testing.T) { testCases := map[string]struct { commandline []string // The command line of the fake server language language.Language + env []string }{ + "dotnet": { + commandline: []string{"dotnet", "foo.dll"}, + language: language.DotNet, + env: []string{ + "CORECLR_ENABLE_PROFILING=1", + }, + }, "java": { commandline: []string{"java", "-javaagent:/path/to/dd-java-agent.jar", "-jar", "foo.jar"}, language: language.Java, @@ -473,6 +530,7 @@ func TestAPMInstrumentationProvided(t *testing.T) { bin := filepath.Join(serverDir, test.commandline[0]) cmd := exec.CommandContext(ctx, bin, test.commandline[1:]...) + cmd.Env = append(cmd.Env, test.env...) err := cmd.Start() require.NoError(t, err) @@ -510,7 +568,19 @@ func assertStat(t assert.TestingT, svc model.Service) { return } - assert.Equal(t, uint64(createTimeMs/1000), svc.StartTimeSecs) + // The value returned by proc.CreateTime() can vary between invocations + // since the BootTime (used internally in proc.CreateTime()) can vary when + // the version of BootTimeWithContext which uses /proc/uptime is active in + // gopsutil (either on Docker, or even outside of it due to a bug fixed in + // v4.24.8: + // https://github.com/shirou/gopsutil/commit/aa0b73dc6d5669de5bc9483c0655b1f9446317a9). + // + // This is due to an inherent race since the code in BootTimeWithContext + // substracts the uptime of the host from the current time, and there can be + // in theory an unbounded amount of time between the read of /proc/uptime + // and the retrieval of the current time. Allow a 10 second diff as a + // reasonable value. + assert.InDelta(t, uint64(createTimeMs/1000), svc.StartTimeSecs, 10) } func assertCPU(t *testing.T, url string, pid int) { @@ -525,7 +595,9 @@ func assertCPU(t *testing.T, url string, pid int) { // Calling getServicesMap a second time us the CPU usage percentage since the last call, which should be close to gopsutil value. portMap := getServicesMap(t, url) assert.Contains(t, portMap, pid) - assert.InDelta(t, referenceValue, portMap[pid].CPUCores, 0.10) + // gopsutil reports a percentage, while we are reporting a float between 0 and $(nproc), + // so we convert our value to a percentage. + assert.InDelta(t, referenceValue, portMap[pid].CPUCores*100, 10) } func TestCommandLineSanitization(t *testing.T) { @@ -602,7 +674,8 @@ func TestNodeDocker(t *testing.T) { require.EventuallyWithT(t, func(collect *assert.CollectT) { svcMap := getServicesMap(t, url) assert.Contains(collect, svcMap, pid) - assert.Equal(collect, "nodejs-https-server", svcMap[pid].GeneratedName) + // test@... changed to test_... due to normalization. + assert.Equal(collect, "test_nodejs-https-server", svcMap[pid].GeneratedName) assert.Equal(collect, svcMap[pid].GeneratedName, svcMap[pid].Name) assert.Equal(collect, "provided", svcMap[pid].APMInstrumentation) assertStat(collect, svcMap[pid]) @@ -610,40 +683,60 @@ func TestNodeDocker(t *testing.T) { }, 30*time.Second, 100*time.Millisecond) } -func TestAPMInstrumentationProvidedPython(t *testing.T) { +func TestAPMInstrumentationProvidedWithMaps(t *testing.T) { curDir, err := testutil.CurDir() require.NoError(t, err) - fmapper := fileopener.BuildFmapper(t) - fakePython := makeAlias(t, "python", fmapper) - - // We need the process to map something in a directory called - // "site-packages/ddtrace". The actual mapped file does not matter. - ddtrace := filepath.Join(curDir, "..", "..", "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace") - lib := filepath.Join(ddtrace, fmt.Sprintf("libssl.so.%s", runtime.GOARCH)) + for _, test := range []struct { + alias string + lib string + language language.Language + }{ + { + alias: "python", + // We need the process to map something in a directory called + // "site-packages/ddtrace". The actual mapped file does not matter. + lib: filepath.Join(curDir, + "..", "..", "..", "..", + "network", "usm", "testdata", + "site-packages", "ddtrace", + fmt.Sprintf("libssl.so.%s", runtime.GOARCH)), + language: language.Python, + }, + { + alias: "dotnet", + lib: filepath.Join(curDir, "testdata", "Datadog.Trace.dll"), + language: language.DotNet, + }, + } { + t.Run(test.alias, func(t *testing.T) { + fmapper := fileopener.BuildFmapper(t) + fake := makeAlias(t, test.alias, fmapper) - // Give the process a listening socket - listener, err := net.Listen("tcp", "") - require.NoError(t, err) - f, err := listener.(*net.TCPListener).File() - listener.Close() - require.NoError(t, err) - t.Cleanup(func() { f.Close() }) - disableCloseOnExec(t, f) + // Give the process a listening socket + listener, err := net.Listen("tcp", "") + require.NoError(t, err) + f, err := listener.(*net.TCPListener).File() + listener.Close() + require.NoError(t, err) + t.Cleanup(func() { f.Close() }) + disableCloseOnExec(t, f) - cmd, err := fileopener.OpenFromProcess(t, fakePython, lib) - require.NoError(t, err) + cmd, err := fileopener.OpenFromProcess(t, fake, test.lib) + require.NoError(t, err) - url := setupDiscoveryModule(t) + url := setupDiscoveryModule(t) - pid := cmd.Process.Pid - require.EventuallyWithT(t, func(collect *assert.CollectT) { - portMap := getServicesMap(t, url) - assert.Contains(collect, portMap, pid) - assert.Equal(collect, string(language.Python), portMap[pid].Language) - assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation) - assertStat(collect, portMap[pid]) - }, 30*time.Second, 100*time.Millisecond) + pid := cmd.Process.Pid + require.EventuallyWithT(t, func(collect *assert.CollectT) { + portMap := getServicesMap(t, url) + assert.Contains(collect, portMap, pid) + assert.Equal(collect, string(test.language), portMap[pid].Language) + assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation) + assertStat(collect, portMap[pid]) + }, 30*time.Second, 100*time.Millisecond) + }) + } } // Check that we can get listening processes in other namespaces. @@ -758,7 +851,7 @@ func TestCache(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() }) - f, _ := startTCPServer(t, "tcp4") + f, _ := startTCPServer(t, "tcp4", "") defer f.Close() disableCloseOnExec(t, f) @@ -914,12 +1007,17 @@ func BenchmarkOldGetSockets(b *testing.B) { } // addSockets adds only listening sockets to a map to be used for later looksups. -func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, sockets P, state uint64) { +func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, sockets P, + family network.ConnectionFamily, ctype network.ConnectionType, state uint64) { for _, sock := range sockets { if sock.St != state { continue } - sockMap[sock.Inode] = socketInfo{port: uint16(sock.LocalPort)} + port := uint16(sock.LocalPort) + if state == udpListen && network.IsPortInEphemeralRange(family, ctype, port) == network.EphemeralTrue { + continue + } + sockMap[sock.Inode] = socketInfo{port: port} } } @@ -937,10 +1035,10 @@ func getNsInfoOld(pid int) (*namespaceInfo, error) { listeningSockets := make(map[uint64]socketInfo) - addSockets(listeningSockets, TCP, tcpListen) - addSockets(listeningSockets, TCP6, tcpListen) - addSockets(listeningSockets, UDP, udpListen) - addSockets(listeningSockets, UDP6, udpListen) + addSockets(listeningSockets, TCP, network.AFINET, network.TCP, tcpListen) + addSockets(listeningSockets, TCP6, network.AFINET6, network.TCP, tcpListen) + addSockets(listeningSockets, UDP, network.AFINET, network.UDP, udpListen) + addSockets(listeningSockets, UDP6, network.AFINET6, network.UDP, udpListen) return &namespaceInfo{ listeningSockets: listeningSockets, diff --git a/pkg/collector/corechecks/servicediscovery/module/injected_process.go b/pkg/collector/corechecks/servicediscovery/module/injected_process.go new file mode 100644 index 0000000000000..5c99b0775e5ee --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/injected_process.go @@ -0,0 +1,18 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:generate go run github.com/tinylib/msgp -io=false + +package module + +// InjectedProcess represents the data injected by the auto-injector into the +// process. +type InjectedProcess struct { + LocalHostname string `msgp:"local_hostname"` + InjectedEnv [][]byte `msgp:"injected_envs"` + LanguageName string `msgp:"language_name"` + TracerVersion string `msgp:"tracer_version"` + InjectorVersion string `msgp:"injector_version"` +} diff --git a/pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go new file mode 100644 index 0000000000000..6ecd5ef86d4c4 --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen.go @@ -0,0 +1,115 @@ +package module + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *InjectedProcess) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "LocalHostname" + o = append(o, 0x85, 0xad, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.LocalHostname) + // string "InjectedEnv" + o = append(o, 0xab, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x76) + o = msgp.AppendArrayHeader(o, uint32(len(z.InjectedEnv))) + for za0001 := range z.InjectedEnv { + o = msgp.AppendBytes(o, z.InjectedEnv[za0001]) + } + // string "LanguageName" + o = append(o, 0xac, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.LanguageName) + // string "TracerVersion" + o = append(o, 0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.TracerVersion) + // string "InjectorVersion" + o = append(o, 0xaf, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.InjectorVersion) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *InjectedProcess) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "LocalHostname": + z.LocalHostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LocalHostname") + return + } + case "InjectedEnv": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "InjectedEnv") + return + } + if cap(z.InjectedEnv) >= int(zb0002) { + z.InjectedEnv = (z.InjectedEnv)[:zb0002] + } else { + z.InjectedEnv = make([][]byte, zb0002) + } + for za0001 := range z.InjectedEnv { + z.InjectedEnv[za0001], bts, err = msgp.ReadBytesBytes(bts, z.InjectedEnv[za0001]) + if err != nil { + err = msgp.WrapError(err, "InjectedEnv", za0001) + return + } + } + case "LanguageName": + z.LanguageName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LanguageName") + return + } + case "TracerVersion": + z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "InjectorVersion": + z.InjectorVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "InjectorVersion") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *InjectedProcess) Msgsize() (s int) { + s = 1 + 14 + msgp.StringPrefixSize + len(z.LocalHostname) + 12 + msgp.ArrayHeaderSize + for za0001 := range z.InjectedEnv { + s += msgp.BytesPrefixSize + len(z.InjectedEnv[za0001]) + } + s += 13 + msgp.StringPrefixSize + len(z.LanguageName) + 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 16 + msgp.StringPrefixSize + len(z.InjectorVersion) + return +} diff --git a/pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go new file mode 100644 index 0000000000000..dbbe388c42a8b --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/injected_process_gen_test.go @@ -0,0 +1,67 @@ +package module + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalInjectedProcess(t *testing.T) { + v := InjectedProcess{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgInjectedProcess(b *testing.B) { + v := InjectedProcess{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgInjectedProcess(b *testing.B) { + v := InjectedProcess{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalInjectedProcess(b *testing.B) { + v := InjectedProcess{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/collector/corechecks/servicediscovery/module/stat.go b/pkg/collector/corechecks/servicediscovery/module/stat.go index 25077dc6d75a9..ca894aaf0e727 100644 --- a/pkg/collector/corechecks/servicediscovery/module/stat.go +++ b/pkg/collector/corechecks/servicediscovery/module/stat.go @@ -85,24 +85,25 @@ func getGlobalCPUTime() (uint64, error) { return totalTime, nil } -func updateCPUCoresStats(proc *process.Process, info *serviceInfo, lastGlobalCPUTime, currentGlobalCPUTime uint64) (float64, error) { - statPath := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "stat") +// updateCPUCoresStats updates the provided serviceInfo cpuUsage and cpuTime stats. +func updateCPUCoresStats(pid int, info *serviceInfo, lastGlobalCPUTime, currentGlobalCPUTime uint64) error { + statPath := kernel.HostProc(strconv.Itoa(pid), "stat") // This file is very small so just read it fully. content, err := os.ReadFile(statPath) if err != nil { - return 0, err + return err } startIndex := bytes.LastIndexByte(content, byte(')')) if startIndex == -1 || startIndex+1 >= len(content) { - return 0, errors.New("invalid stat format") + return errors.New("invalid stat format") } // See proc(5) for a description of the format of statm and the fields. fields := strings.Fields(string(content[startIndex+1:])) if len(fields) < 50 { - return 0, errors.New("invalid stat format") + return errors.New("invalid stat format") } // Parse fields number 14 and 15, resp. User and System CPU time. @@ -110,19 +111,19 @@ func updateCPUCoresStats(proc *process.Process, info *serviceInfo, lastGlobalCPU // Here we address 11 & 12 since we skipped the first two fields. usrTime, err := strconv.ParseUint(fields[11], 10, 64) if err != nil { - return 0, err + return err } sysTime, err := strconv.ParseUint(fields[12], 10, 64) if err != nil { - return 0, err + return err } processTimeDelta := float64(usrTime + sysTime - info.cpuTime) globalTimeDelta := float64(currentGlobalCPUTime - lastGlobalCPUTime) - cpuUsage := processTimeDelta / globalTimeDelta * float64(runtime.NumCPU()) + info.cpuUsage = processTimeDelta / globalTimeDelta * float64(runtime.NumCPU()) info.cpuTime = usrTime + sysTime - return cpuUsage, nil + return nil } diff --git a/pkg/collector/corechecks/servicediscovery/module/testdata/Datadog.Trace.dll b/pkg/collector/corechecks/servicediscovery/module/testdata/Datadog.Trace.dll new file mode 100644 index 0000000000000..421376db9e8ae --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/testdata/Datadog.Trace.dll @@ -0,0 +1 @@ +dummy diff --git a/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore b/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore index 16df22f27b688..eaaf842eef4d4 100644 --- a/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore +++ b/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore @@ -1,4 +1,6 @@ fake_server +dotnet +python java node sshd diff --git a/pkg/collector/corechecks/servicediscovery/service_detector.go b/pkg/collector/corechecks/servicediscovery/service_detector.go index ac0434be492e8..f403ec3bbde4a 100644 --- a/pkg/collector/corechecks/servicediscovery/service_detector.go +++ b/pkg/collector/corechecks/servicediscovery/service_detector.go @@ -9,7 +9,9 @@ import ( "slices" "strings" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) // ServiceMetadata stores metadata about a service. @@ -40,11 +42,27 @@ func makeFinalName(meta usm.ServiceMetadata) string { return name } +// fixupMetadata performs additional adjustments on the meta data returned from +// the meta data extraction library. +func fixupMetadata(meta usm.ServiceMetadata, lang language.Language) usm.ServiceMetadata { + meta.Name = makeFinalName(meta) + + langName := "" + if lang != language.Unknown { + langName = string(lang) + } + meta.Name, _ = traceutil.NormalizeService(meta.Name, langName) + if meta.DDService != "" { + meta.DDService, _ = traceutil.NormalizeService(meta.DDService, langName) + } + + return meta +} + // GetServiceName gets the service name based on the command line arguments and // the list of environment variables. -func GetServiceName(cmdline []string, env map[string]string, root string, contextMap usm.DetectorContextMap) usm.ServiceMetadata { +func GetServiceName(cmdline []string, env map[string]string, root string, lang language.Language, contextMap usm.DetectorContextMap) usm.ServiceMetadata { fs := usm.NewSubDirFS(root) - meta, _ := usm.ExtractServiceMetadata(cmdline, env, fs, contextMap) - meta.Name = makeFinalName(meta) - return meta + meta, _ := usm.ExtractServiceMetadata(cmdline, env, fs, lang, contextMap) + return fixupMetadata(meta, lang) } diff --git a/pkg/collector/corechecks/servicediscovery/service_detector_test.go b/pkg/collector/corechecks/servicediscovery/service_detector_test.go new file mode 100644 index 0000000000000..5f250161237c4 --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/service_detector_test.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package servicediscovery + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" +) + +func TestFixup(t *testing.T) { + meta := fixupMetadata(usm.ServiceMetadata{Name: "fOo", DDService: "BAR"}, language.Go) + assert.Equal(t, meta.Name, "foo") + assert.Equal(t, meta.DDService, "bar") + + meta = fixupMetadata(usm.ServiceMetadata{Name: ""}, language.Go) + assert.Equal(t, meta.Name, "unnamed-go-service") + assert.Equal(t, meta.DDService, "") + + meta = fixupMetadata(usm.ServiceMetadata{Name: ""}, language.Unknown) + assert.Equal(t, meta.Name, "unnamed-service") + assert.Equal(t, meta.DDService, "") + + meta = fixupMetadata(usm.ServiceMetadata{Name: "foo", AdditionalNames: []string{"bar", "baz"}}, language.Go) + assert.Equal(t, meta.Name, "foo-bar-baz") +} diff --git a/pkg/collector/corechecks/servicediscovery/servicediscovery.go b/pkg/collector/corechecks/servicediscovery/servicediscovery.go index 61e97ca5de80c..1cdd101ebeb1e 100644 --- a/pkg/collector/corechecks/servicediscovery/servicediscovery.go +++ b/pkg/collector/corechecks/servicediscovery/servicediscovery.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -101,9 +101,6 @@ func newCheck() check.Check { // Configure parses the check configuration and initializes the check func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, instanceConfig, initConfig integration.Data, source string) error { - if !pkgconfig.SystemProbe().GetBool("discovery.enabled") { - return errors.New("service discovery is disabled") - } if newOSImpl == nil { return errors.New("service_discovery check not implemented on " + runtime.GOOS) } @@ -135,6 +132,10 @@ func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, instance // Run executes the check. func (c *Check) Run() error { + if !pkgconfigsetup.SystemProbe().GetBool("discovery.enabled") { + return nil + } + start := time.Now() defer func() { diff := time.Since(start).Seconds() diff --git a/pkg/collector/corechecks/servicediscovery/usm/service.go b/pkg/collector/corechecks/servicediscovery/usm/service.go index 9de72147d64df..36ddc7e804d35 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service.go @@ -17,6 +17,8 @@ import ( "slices" "strings" "unicode" + + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" ) type detectorCreatorFn func(ctx DetectionContext) detector @@ -165,20 +167,21 @@ func SizeVerifiedReader(file fs.File) (io.Reader, error) { return io.LimitReader(file, min(size, maxParseFileSize)), nil } -// List of binaries that usually have additional process context of what's running -var binsWithContext = map[string]detectorCreatorFn{ - "python": newPythonDetector, - "python2.7": newPythonDetector, - "python3": newPythonDetector, - "python3.7": newPythonDetector, - "ruby2.3": newSimpleDetector, - "ruby": newSimpleDetector, - "java": newJavaDetector, - "sudo": newSimpleDetector, - "node": newNodeDetector, - "dotnet": newDotnetDetector, - "php": newPhpDetector, - "gunicorn": newGunicornDetector, +// Map languages to their context detectors +var languageDetectors = map[language.Language]detectorCreatorFn{ + language.Python: newPythonDetector, + language.Ruby: newSimpleDetector, + language.Java: newJavaDetector, + language.Node: newNodeDetector, + language.DotNet: newDotnetDetector, + language.PHP: newPhpDetector, +} + +// Map executables that usually have additional process context of what's +// running, to context detectors +var executableDetectors = map[string]detectorCreatorFn{ + "sudo": newSimpleDetector, + "gunicorn": newGunicornDetector, } func serviceNameInjected(envs map[string]string) bool { @@ -194,7 +197,7 @@ func serviceNameInjected(envs map[string]string) bool { } // ExtractServiceMetadata attempts to detect ServiceMetadata from the given process. -func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, contextMap DetectorContextMap) (metadata ServiceMetadata, success bool) { +func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, lang language.Language, contextMap DetectorContextMap) (metadata ServiceMetadata, success bool) { dc := DetectionContext{ args: args, envs: envs, @@ -234,7 +237,12 @@ func ExtractServiceMetadata(args []string, envs map[string]string, fs fs.SubFS, exe = normalizeExeName(exe) - if detectorProvider, ok := binsWithContext[exe]; ok { + detectorProvider, ok := executableDetectors[exe] + if !ok { + detectorProvider, ok = languageDetectors[lang] + } + + if ok { langMeta, ok := detectorProvider(dc).detect(cmd[1:]) // The detector could return a DD Service name (eg. Java, from the diff --git a/pkg/collector/corechecks/servicediscovery/usm/service_test.go b/pkg/collector/corechecks/servicediscovery/usm/service_test.go index 38d8c390bb169..d3e4f2df2b6a3 100644 --- a/pkg/collector/corechecks/servicediscovery/usm/service_test.go +++ b/pkg/collector/corechecks/servicediscovery/usm/service_test.go @@ -18,6 +18,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" ) @@ -50,6 +51,7 @@ func TestExtractServiceMetadata(t *testing.T) { name string cmdline []string envs map[string]string + lang language.Language expectedGeneratedName string expectedDDService string expectedAdditionalServices []string @@ -111,6 +113,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/opt/python/2.7.11/bin/python2.7", "flask", "run", "--host=0.0.0.0", }, + lang: language.Python, expectedGeneratedName: "flask", envs: map[string]string{"PWD": "testdata/python"}, fs: &subUsmTestData, @@ -120,6 +123,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/opt/python/2.7.11/bin/python2.7", "testdata/python/flask", "run", "--host=0.0.0.0", "--without-threads", }, + lang: language.Python, expectedGeneratedName: "flask", fs: &subUsmTestData, }, @@ -128,6 +132,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/opt/python/2.7.11/bin/python2.7 flask run --host=0.0.0.0", }, + lang: language.Python, envs: map[string]string{"PWD": "testdata/python"}, expectedGeneratedName: "flask", fs: &subUsmTestData, @@ -137,6 +142,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "python3", "-m", "hello", }, + lang: language.Python, expectedGeneratedName: "hello", }, { @@ -144,6 +150,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "ruby", "/usr/sbin/td-agent", "--log", "/var/log/td-agent/td-agent.log", "--daemon", "/var/run/td-agent/td-agent.pid", }, + lang: language.Ruby, expectedGeneratedName: "td-agent", }, { @@ -151,6 +158,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "-jar", "/opt/sheepdog/bin/myservice.jar", }, + lang: language.Java, expectedGeneratedName: "myservice", }, { @@ -158,6 +166,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "com.datadog.example.HelloWorld", }, + lang: language.Java, expectedGeneratedName: "HelloWorld", }, { @@ -165,6 +174,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "java", "-Xmx4000m", "-Xms4000m", "-XX:ReservedCodeCacheSize=256m", "kafka.Kafka", }, + lang: language.Java, expectedGeneratedName: "Kafka", }, { @@ -175,6 +185,7 @@ func TestExtractServiceMetadata(t *testing.T) { "-cp", "/etc/cassandra:/usr/share/cassandra/lib/HdrHistogram-2.1.9.jar:/usr/share/cassandra/lib/cassandra-driver-core-3.0.1-shaded.jar", "org.apache.cassandra.service.CassandraDaemon", }, + lang: language.Java, expectedGeneratedName: "cassandra", }, { @@ -182,8 +193,10 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/home/dd/my java dir/java", "com.dog.cat", }, + lang: language.Java, expectedGeneratedName: "cat", - }, { + }, + { name: "node js with package.json not present", cmdline: []string{ "/usr/bin/node", @@ -193,6 +206,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--", "/somewhere/index.js", }, + lang: language.Node, expectedGeneratedName: "node", }, { @@ -201,6 +215,7 @@ func TestExtractServiceMetadata(t *testing.T) { "/usr/bin/node", "./testdata/inner/index.js", }, + lang: language.Node, expectedGeneratedName: "node", }, { @@ -213,6 +228,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--", "./testdata/index.js", }, + lang: language.Node, expectedGeneratedName: "my-awesome-package", fs: &subUsmTestData, }, @@ -226,6 +242,7 @@ func TestExtractServiceMetadata(t *testing.T) { "./testdata/bins/broken", "./testdata/bins/json-server", }, + lang: language.Node, expectedGeneratedName: "json-server-package", skipOnWindows: true, fs: &subUsmTestData, @@ -240,6 +257,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--", "index.js", }, + lang: language.Node, envs: map[string]string{"PWD": "testdata/deep"}, // it's relative but it's ok for testing purposes fs: &subUsmTestData, expectedGeneratedName: "my-awesome-package", @@ -251,11 +269,13 @@ func TestExtractServiceMetadata(t *testing.T) { "-jar", springBootAppFullPath, }, + lang: language.Java, expectedGeneratedName: "default-app", }, { name: "wildfly 18 standalone", - cmdline: []string{"home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java", + cmdline: []string{ + "home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java", "-D[Standalone]", "-server", "-Xms64m", @@ -276,7 +296,9 @@ func TestExtractServiceMetadata(t *testing.T) { "" + jbossTestAppRoot + "/modules", "org.jboss.as.standalone", "-Djboss.home.dir=" + jbossTestAppRoot, - "-Djboss.server.base.dir=" + jbossTestAppRoot + "/standalone"}, + "-Djboss.server.base.dir=" + jbossTestAppRoot + "/standalone", + }, + lang: language.Java, expectedGeneratedName: "jboss-modules", expectedAdditionalServices: []string{"my-jboss-webapp", "some_context_root", "web3"}, fs: &sub, @@ -284,7 +306,8 @@ func TestExtractServiceMetadata(t *testing.T) { }, { name: "wildfly 18 domain", - cmdline: []string{"/home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java", + cmdline: []string{ + "/home/app/.sdkman/candidates/java/17.0.4.1-tem/bin/java", "--add-exports=java.base/sun.nio.ch=ALL-UNNAMED", "--add-exports=jdk.unsupported/sun.reflect=ALL-UNNAMED", "--add-exports=jdk.unsupported/sun.misc=ALL-UNNAMED", @@ -308,7 +331,9 @@ func TestExtractServiceMetadata(t *testing.T) { "" + jbossTestAppRoot + "/jboss-modules.jar", "-mp", "" + jbossTestAppRoot + "/modules", - "org.jboss.as.server"}, + "org.jboss.as.server", + }, + lang: language.Java, expectedGeneratedName: "jboss-modules", expectedAdditionalServices: []string{"web3", "web4"}, fs: &sub, @@ -317,7 +342,8 @@ func TestExtractServiceMetadata(t *testing.T) { { name: "weblogic 12", fs: &sub, - cmdline: []string{"/u01/jdk/bin/java", + cmdline: []string{ + "/u01/jdk/bin/java", "-Djava.security.egd=file:/dev/./urandom", "-cp", "/u01/oracle/wlserver/server/lib/weblogic-launcher.jar", @@ -329,7 +355,9 @@ func TestExtractServiceMetadata(t *testing.T) { "-da", "-Dwls.home=/u01/oracle/wlserver/server", "-Dweblogic.home=/u01/oracle/wlserver/server", - "weblogic.Server"}, + "weblogic.Server", + }, + lang: language.Java, envs: map[string]string{"PWD": weblogicTestAppRootAbsolute}, expectedGeneratedName: "Server", expectedAdditionalServices: []string{"my_context", "sample4", "some_context_root"}, @@ -339,6 +367,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/java", "-Ddd.service=custom", "-jar", "app.jar", }, + lang: language.Java, expectedDDService: "custom", expectedGeneratedName: "app", }, @@ -349,6 +378,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/java", "-Ddd.service=dd-service-from-property", "-jar", "app.jar", }, + lang: language.Java, envs: map[string]string{"DD_SERVICE": "dd-service-from-env"}, expectedDDService: "dd-service-from-property", expectedGeneratedName: "app", @@ -375,6 +405,7 @@ func TestExtractServiceMetadata(t *testing.T) { "org.apache.catalina.startup.Bootstrap", "start", }, + lang: language.Java, expectedGeneratedName: "catalina", expectedAdditionalServices: []string{"app2", "custom"}, fs: &subUsmTestData, @@ -384,6 +415,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/dotnet", "./myservice.dll", }, + lang: language.DotNet, expectedGeneratedName: "myservice", }, { @@ -391,6 +423,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/dotnet", "-v", "--", "/app/lib/myservice.dll", }, + lang: language.DotNet, expectedGeneratedName: "myservice", }, { @@ -398,6 +431,7 @@ func TestExtractServiceMetadata(t *testing.T) { cmdline: []string{ "/usr/bin/dotnet", "run", "--project", "./projects/proj1/proj1.csproj", }, + lang: language.DotNet, expectedGeneratedName: "dotnet", }, { @@ -407,6 +441,7 @@ func TestExtractServiceMetadata(t *testing.T) { "artisan", "serve", }, + lang: language.PHP, expectedGeneratedName: "laravel", }, { @@ -416,6 +451,7 @@ func TestExtractServiceMetadata(t *testing.T) { "-ddatadog.service=foo", "swoole-server.php", }, + lang: language.PHP, expectedGeneratedName: "foo", }, { @@ -425,6 +461,7 @@ func TestExtractServiceMetadata(t *testing.T) { "artisan", "migrate:fresh", }, + lang: language.PHP, expectedGeneratedName: "laravel", }, { @@ -434,6 +471,7 @@ func TestExtractServiceMetadata(t *testing.T) { "artisan", "migrate:fresh", }, + lang: language.PHP, expectedGeneratedName: "laravel", }, { @@ -455,6 +493,7 @@ func TestExtractServiceMetadata(t *testing.T) { { name: "DD_SERVICE_set_manually", cmdline: []string{"java", "-jar", "Foo.jar"}, + lang: language.Java, envs: map[string]string{"DD_SERVICE": "howdy"}, expectedDDService: "howdy", expectedGeneratedName: "Foo", @@ -462,6 +501,7 @@ func TestExtractServiceMetadata(t *testing.T) { { name: "DD_SERVICE_set_manually_tags", cmdline: []string{"java", "-jar", "Foo.jar"}, + lang: language.Java, envs: map[string]string{"DD_TAGS": "service:howdy"}, expectedDDService: "howdy", expectedGeneratedName: "Foo", @@ -469,6 +509,7 @@ func TestExtractServiceMetadata(t *testing.T) { { name: "DD_SERVICE_set_manually_injection", cmdline: []string{"java", "-jar", "Foo.jar"}, + lang: language.Java, envs: map[string]string{"DD_SERVICE": "howdy", "DD_INJECTION_ENABLED": "tracer,service_name"}, expectedDDService: "howdy", expectedGeneratedName: "Foo", @@ -481,6 +522,7 @@ func TestExtractServiceMetadata(t *testing.T) { "--workers=2", "test:app", }, + lang: language.Python, expectedGeneratedName: "test", }, { @@ -581,7 +623,7 @@ func TestExtractServiceMetadata(t *testing.T) { if tt.fs != nil { fs = *tt.fs } - meta, ok := ExtractServiceMetadata(tt.cmdline, tt.envs, fs, make(DetectorContextMap)) + meta, ok := ExtractServiceMetadata(tt.cmdline, tt.envs, fs, tt.lang, make(DetectorContextMap)) if len(tt.expectedGeneratedName) == 0 && len(tt.expectedDDService) == 0 { require.False(t, ok) } else { diff --git a/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go b/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go index d1481d8d27538..ad5dd0e5d0909 100644 --- a/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go +++ b/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/common" @@ -26,7 +26,7 @@ func TestProfileBundleJsonZip(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "zipprofiles.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { diff --git a/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go b/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go index fdd891c5bc67d..c73e5a06b3561 100644 --- a/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go +++ b/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" @@ -33,7 +33,7 @@ func TestProfileMetadata_f5(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { diff --git a/pkg/collector/corechecks/snmp/integration_topology_test.go b/pkg/collector/corechecks/snmp/integration_topology_test.go index 8aea54e5244c6..1945a8362cad1 100644 --- a/pkg/collector/corechecks/snmp/integration_topology_test.go +++ b/pkg/collector/corechecks/snmp/integration_topology_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" @@ -33,7 +33,7 @@ func TestTopologyPayload_LLDP(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -735,7 +735,7 @@ func TestTopologyPayload_CDP(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -1428,7 +1428,7 @@ func TestTopologyPayload_LLDP_CDP(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go index 2fd5d6bba5591..1dc318f384825 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" coreutil "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -512,7 +512,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data } else if initConfig.Namespace != "" { c.Namespace = initConfig.Namespace } else { - c.Namespace = coreconfig.Datadog().GetString("network_devices.namespace") + c.Namespace = pkgconfigsetup.Datadog().GetString("network_devices.namespace") } c.Namespace, err = utils.NormalizeNamespace(c.Namespace) diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go index 67c3d49600777..49bfd183f77a0 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/pkg/aggregator" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkdevice/pinger" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" @@ -1430,7 +1430,7 @@ collect_topology: true } func Test_buildConfig_namespace(t *testing.T) { - defer coreconfig.Datadog().SetWithoutSource("network_devices.namespace", "default") + defer pkgconfigsetup.Datadog().SetWithoutSource("network_devices.namespace", "default") // Should use namespace defined in instance config // language=yaml @@ -1475,7 +1475,7 @@ ip_address: 1.2.3.4 community_string: "abc" `) rawInitConfig = []byte(``) - coreconfig.Datadog().SetWithoutSource("network_devices.namespace", "totoro") + pkgconfigsetup.Datadog().SetWithoutSource("network_devices.namespace", "totoro") conf, err = NewCheckConfig(rawInstanceConfig, rawInitConfig) assert.Nil(t, err) assert.Equal(t, "totoro", conf.Namespace) @@ -1503,7 +1503,7 @@ community_string: "abc" `) rawInitConfig = []byte(` namespace: `) - coreconfig.Datadog().SetWithoutSource("network_devices.namespace", "mononoke") + pkgconfigsetup.Datadog().SetWithoutSource("network_devices.namespace", "mononoke") conf, err = NewCheckConfig(rawInstanceConfig, rawInitConfig) assert.Nil(t, err) assert.Equal(t, "mononoke", conf.Namespace) @@ -1515,7 +1515,7 @@ ip_address: 1.2.3.4 community_string: "abc" `) rawInitConfig = []byte(``) - coreconfig.Datadog().SetWithoutSource("network_devices.namespace", "") + pkgconfigsetup.Datadog().SetWithoutSource("network_devices.namespace", "") _, err = NewCheckConfig(rawInstanceConfig, rawInitConfig) assert.EqualError(t, err, "namespace cannot be empty") } @@ -2442,7 +2442,7 @@ func TestCheckConfig_getResolvedSubnetName(t *testing.T) { } func TestCheckConfig_GetStaticTags(t *testing.T) { - coreconfig.Datadog().SetWithoutSource("hostname", "my-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "my-hostname") tests := []struct { name string config CheckConfig diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go index 1ee597fec2e25..8737e5845378f 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go @@ -19,7 +19,7 @@ import ( "go.uber.org/atomic" "github.com/DataDog/datadog-agent/pkg/collector/externalhost" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/hostname/validate" @@ -65,6 +65,7 @@ type DeviceCheck struct { config *checkconfig.CheckConfig sender *report.MetricSender session session.Session + sessionFactory session.Factory devicePinger pinger.Pinger sessionCloseErrorCount *atomic.Uint64 savedDynamicTags []string @@ -80,12 +81,8 @@ const cacheKeyPrefix = "snmp-tags" func NewDeviceCheck(config *checkconfig.CheckConfig, ipAddress string, sessionFactory session.Factory) (*DeviceCheck, error) { newConfig := config.CopyWithNewIP(ipAddress) - sess, err := sessionFactory(newConfig) - if err != nil { - return nil, fmt.Errorf("failed to configure session: %s", err) - } - var devicePinger pinger.Pinger + var err error if newConfig.PingEnabled { devicePinger, err = createPinger(newConfig.PingConfig) if err != nil { @@ -98,7 +95,7 @@ func NewDeviceCheck(config *checkconfig.CheckConfig, ipAddress string, sessionFa d := DeviceCheck{ config: newConfig, - session: sess, + sessionFactory: sessionFactory, devicePinger: devicePinger, sessionCloseErrorCount: atomic.NewUint64(0), nextAutodetectMetrics: timeNow(), @@ -160,6 +157,12 @@ func (d *DeviceCheck) Run(collectionTime time.Time) error { startTime := time.Now() staticTags := append(d.config.GetStaticTags(), d.config.GetNetworkTags()...) + var err error + d.session, err = d.sessionFactory(d.config) + if err != nil { + return err + } + // Fetch and report metrics var checkErr error var deviceStatus metadata.DeviceStatus @@ -249,7 +252,7 @@ func (d *DeviceCheck) setDeviceHostExternalTags() { if deviceHostname == "" || err != nil { return } - agentTags := configUtils.GetConfiguredTags(config.Datadog(), false) + agentTags := configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false) log.Debugf("Set external tags for device host, host=`%s`, agentTags=`%v`", deviceHostname, agentTags) externalhost.SetExternalTags(deviceHostname, common.SnmpExternalTagsSourceType, agentTags) } diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go index f06bd065e5059..028e233687aca 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/version" @@ -242,7 +242,7 @@ func TestDetectMetricsToCollect(t *testing.T) { defer func() { timeNow = time.Now }() profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "detectmetr.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) sess := session.CreateFakeSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -421,6 +421,9 @@ profiles: deviceCk, err := NewDeviceCheck(config, "1.2.3.4", sessionFactory) assert.Nil(t, err) + deviceCk.session, err = sessionFactory(config) + assert.Nil(t, err) + sender := mocksender.NewMockSender("123") // required to initiate aggregator deviceCk.SetSender(report.NewMetricSender(sender, "", nil, report.MakeInterfaceBandwidthState())) sess.On("GetNext", []string{"1.0"}).Return(session.CreateGetNextPacket("9999", gosnmp.EndOfMibView, nil), nil) @@ -912,6 +915,9 @@ community_string: public deviceCk, err := NewDeviceCheck(config, "1.2.3.4", sessionFactory) assert.Nil(t, err) + deviceCk.session, err = sessionFactory(config) + assert.Nil(t, err) + sender := mocksender.NewMockSender("123") // required to initiate aggregator sender.SetupAcceptAll() diff --git a/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go b/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go index b9f9a0735afdb..9d59def5890d0 100644 --- a/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go +++ b/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/session" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func waitForDiscoveredDevices(discovery *Discovery, expectedDeviceCount int, timeout time.Duration) error { @@ -34,7 +34,7 @@ func waitForDiscoveredDevices(discovery *Discovery, expectedDeviceCount int, tim func TestDiscovery(t *testing.T) { path, _ := filepath.Abs(filepath.Join(".", "test", "run_path", "TestDiscovery")) - config.Datadog().SetWithoutSource("run_path", path) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", path) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -85,7 +85,7 @@ func TestDiscovery(t *testing.T) { func TestDiscoveryCache(t *testing.T) { path, _ := filepath.Abs(filepath.Join(".", "test", "run_path", "TestDiscoveryCache")) - config.Datadog().SetWithoutSource("run_path", path) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", path) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { diff --git a/pkg/collector/corechecks/snmp/internal/discovery/testing.go b/pkg/collector/corechecks/snmp/internal/discovery/testing.go index 10f0fc4286488..820bc1747acd7 100644 --- a/pkg/collector/corechecks/snmp/internal/discovery/testing.go +++ b/pkg/collector/corechecks/snmp/internal/discovery/testing.go @@ -10,11 +10,11 @@ package discovery import ( "path/filepath" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // SetTestRunPath sets run_path for testing func SetTestRunPath() { path, _ := filepath.Abs(filepath.Join(".", "test", "run_path")) - config.Datadog().SetWithoutSource("run_path", path) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", path) } diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_json_bundle_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_json_bundle_test.go index 0aea0a7b22401..7ca8bcbbe3172 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_json_bundle_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_json_bundle_test.go @@ -12,13 +12,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func Test_loadBundleJSONProfiles(t *testing.T) { defaultTestConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "zipprofiles.d")) SetGlobalProfileConfigMap(nil) - config.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) pth := findProfileBundleFilePath() require.FileExists(t, pth) resolvedProfiles, err := loadBundleJSONProfiles(pth) diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_resolver_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_resolver_test.go index b3868a66cb60f..d0e03a880b928 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_resolver_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_resolver_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" @@ -26,18 +26,18 @@ import ( func Test_resolveProfiles(t *testing.T) { defaultTestConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "conf.d")) - config.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) defaultTestConfdProfiles := ProfileConfigMap{} userTestConfdProfiles, err := getProfileDefinitions(userProfilesFolder, true) require.NoError(t, err) profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "invalid_ext.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) profilesWithInvalidExtendProfiles, err := getProfileDefinitions(userProfilesFolder, true) require.NoError(t, err) invalidCyclicConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "invalid_cyclic.d")) - config.Datadog().SetWithoutSource("confd_path", invalidCyclicConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidCyclicConfdPath) invalidCyclicProfiles, err := getProfileDefinitions(userProfilesFolder, true) require.NoError(t, err) @@ -50,7 +50,7 @@ func Test_resolveProfiles(t *testing.T) { require.NoError(t, err) userProfilesCaseConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "user_profiles.d")) - config.Datadog().SetWithoutSource("confd_path", userProfilesCaseConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", userProfilesCaseConfdPath) userProfilesCaseUserProfiles, err := getProfileDefinitions(userProfilesFolder, true) require.NoError(t, err) userProfilesCaseDefaultProfiles, err := getProfileDefinitions(defaultProfilesFolder, true) diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go index b924661ae37df..9b6b4fdfed4de 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" ) @@ -109,7 +109,7 @@ func Test_getProfiles(t *testing.T) { t.Run(tt.name, func(t *testing.T) { SetGlobalProfileConfigMap(nil) path, _ := filepath.Abs(filepath.Join("..", "test", tt.mockConfd)) - coreconfig.Datadog().SetWithoutSource("confd_path", path) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", path) actualProfiles, err := GetProfiles(tt.profiles) if tt.expectedErr != "" { diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go index fc1cc11d0948b..03d7be924a9f3 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go @@ -14,7 +14,7 @@ import ( "gopkg.in/yaml.v2" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -109,7 +109,7 @@ func resolveProfileDefinitionPath(definitionFile string) string { } func getProfileConfdRoot(profileFolderName string) string { - confdPath := config.Datadog().GetString("confd_path") + confdPath := pkgconfigsetup.Datadog().GetString("confd_path") return filepath.Join(confdPath, "snmp.d", profileFolderName) } diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml_test.go index 55c629b9b8d8d..81d7513ab9fa7 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml_test.go @@ -16,7 +16,7 @@ import ( "github.com/cihub/seelog" assert "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" @@ -33,7 +33,7 @@ func getMetricFromProfile(p profiledefinition.ProfileDefinition, metricName stri func Test_resolveProfileDefinitionPath(t *testing.T) { defaultTestConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "user_profiles.d")) - config.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) absPath, _ := filepath.Abs(filepath.Join("tmp", "myfile.yaml")) tests := []struct { @@ -49,17 +49,17 @@ func Test_resolveProfileDefinitionPath(t *testing.T) { { name: "relative path with default profile", definitionFilePath: "p2.yaml", - expectedPath: filepath.Join(config.Datadog().Get("confd_path").(string), "snmp.d", "default_profiles", "p2.yaml"), + expectedPath: filepath.Join(pkgconfigsetup.Datadog().Get("confd_path").(string), "snmp.d", "default_profiles", "p2.yaml"), }, { name: "relative path with user profile", definitionFilePath: "p3.yaml", - expectedPath: filepath.Join(config.Datadog().Get("confd_path").(string), "snmp.d", "profiles", "p3.yaml"), + expectedPath: filepath.Join(pkgconfigsetup.Datadog().Get("confd_path").(string), "snmp.d", "profiles", "p3.yaml"), }, { name: "relative path with user profile precedence", definitionFilePath: "p1.yaml", - expectedPath: filepath.Join(config.Datadog().Get("confd_path").(string), "snmp.d", "profiles", "p1.yaml"), + expectedPath: filepath.Join(pkgconfigsetup.Datadog().Get("confd_path").(string), "snmp.d", "profiles", "p1.yaml"), }, } for _, tt := range tests { @@ -84,7 +84,7 @@ func Test_loadYamlProfiles(t *testing.T) { func Test_loadYamlProfiles_withUserProfiles(t *testing.T) { defaultTestConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "user_profiles.d")) SetGlobalProfileConfigMap(nil) - config.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) defaultProfiles, err := loadYamlProfiles() assert.Nil(t, err) @@ -113,7 +113,7 @@ func Test_loadYamlProfiles_withUserProfiles(t *testing.T) { func Test_loadYamlProfiles_invalidDir(t *testing.T) { invalidPath, _ := filepath.Abs(filepath.Join(".", "tmp", "invalidPath")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) SetGlobalProfileConfigMap(nil) defaultProfiles, err := loadYamlProfiles() @@ -129,7 +129,7 @@ func Test_loadYamlProfiles_invalidExtendProfile(t *testing.T) { log.SetupLogger(l, "debug") profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "invalid_ext.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) SetGlobalProfileConfigMap(nil) defaultProfiles, err := loadYamlProfiles() @@ -150,7 +150,7 @@ func Test_loadYamlProfiles_userAndDefaultProfileFolderDoesNotExist(t *testing.T) log.SetupLogger(l, "debug") profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "does-not-exist.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) SetGlobalProfileConfigMap(nil) defaultProfiles, err := loadYamlProfiles() @@ -173,7 +173,7 @@ func Test_loadYamlProfiles_validAndInvalidProfiles(t *testing.T) { log.SetupLogger(l, "debug") profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "valid_invalid.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) SetGlobalProfileConfigMap(nil) defaultProfiles, err := loadYamlProfiles() diff --git a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go index 144a7a3abcbd1..6831ad07c2827 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go +++ b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go @@ -13,7 +13,7 @@ import ( "github.com/mohae/deepcopy" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" ) @@ -33,7 +33,7 @@ func SetConfdPathAndCleanProfiles() { if !pathExists(file) { file, _ = filepath.Abs(filepath.Join(".", "internal", "test", "conf.d")) } - config.Datadog().SetWithoutSource("confd_path", file) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", file) } // FixtureProfileDefinitionMap returns a fixture of ProfileConfigMap with `f5-big-ip` profile diff --git a/pkg/collector/corechecks/snmp/snmp_test.go b/pkg/collector/corechecks/snmp/snmp_test.go index ce0ea597cc889..0306d9889fcc9 100644 --- a/pkg/collector/corechecks/snmp/snmp_test.go +++ b/pkg/collector/corechecks/snmp/snmp_test.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/externalhost" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -58,7 +58,7 @@ func createDeps(t *testing.T) deps { func Test_Run_simpleCase(t *testing.T) { // We cache the run_path directory because the chk.Run() method will write in cache testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) profile.SetConfdPathAndCleanProfiles() sess := session.CreateMockSession() @@ -339,7 +339,7 @@ tags: func Test_Run_customIfSpeed(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) report.TimeNow = common.MockTimeNow deps := createDeps(t) profile.SetConfdPathAndCleanProfiles() @@ -485,7 +485,7 @@ metrics: func TestSupportedMetricTypes(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) profile.SetConfdPathAndCleanProfiles() sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -560,7 +560,7 @@ metrics: func TestProfile(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) timeNow = common.MockTimeNow deps := createDeps(t) @@ -958,7 +958,7 @@ profiles: func TestServiceCheckFailures(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) profile.SetConfdPathAndCleanProfiles() sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -1041,7 +1041,7 @@ namespace: nsSubnet func TestCheck_Run(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) sysObjectIDPacketInvalidSysObjectIDMock := gosnmp.SnmpPacket{ Variables: []gosnmp.SnmpPDU{ { @@ -1259,7 +1259,7 @@ namespace: '%s' func TestCheck_Run_sessionCloseError(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) profile.SetConfdPathAndCleanProfiles() sess := session.CreateMockSession() @@ -1305,7 +1305,7 @@ metrics: func TestReportDeviceMetadataEvenOnProfileError(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) timeNow = common.MockTimeNow @@ -1618,7 +1618,7 @@ tags: func TestReportDeviceMetadataWithFetchError(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) timeNow = common.MockTimeNow deps := createDeps(t) senderManager := deps.Demultiplexer @@ -1732,7 +1732,7 @@ tags: func TestDiscovery(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) timeNow = common.MockTimeNow profile.SetConfdPathAndCleanProfiles() @@ -2078,7 +2078,7 @@ metric_tags: func TestDiscovery_CheckError(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) profile.SetConfdPathAndCleanProfiles() @@ -2156,7 +2156,7 @@ metric_tags: func TestDeviceIDAsHostname(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) cache.Cache.Delete(cache.BuildAgentKey("hostname")) // clean existing hostname cache @@ -2166,8 +2166,8 @@ func TestDeviceIDAsHostname(t *testing.T) { return sess, nil } chk := Check{sessionFactory: sessionFactory} - coreconfig.Datadog().SetWithoutSource("hostname", "test-hostname") - coreconfig.Datadog().SetWithoutSource("tags", []string{"agent_tag1:val1", "agent_tag2:val2"}) + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "test-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("tags", []string{"agent_tag1:val1", "agent_tag2:val2"}) senderManager := deps.Demultiplexer // language=yaml @@ -2349,7 +2349,7 @@ use_device_id_as_hostname: true func TestDiscoveryDeviceIDAsHostname(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) cache.Cache.Delete(cache.BuildAgentKey("hostname")) // clean existing hostname cache timeNow = common.MockTimeNow @@ -2360,7 +2360,7 @@ func TestDiscoveryDeviceIDAsHostname(t *testing.T) { } chk := Check{sessionFactory: sessionFactory} - coreconfig.Datadog().SetWithoutSource("hostname", "my-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "my-hostname") senderManager := deps.Demultiplexer // language=yaml diff --git a/pkg/collector/corechecks/system/cpu/cpu/cpu_ctx_switches_linux.go b/pkg/collector/corechecks/system/cpu/cpu/cpu_ctx_switches_linux.go index 6fce0b646f394..97dd5eab205e5 100644 --- a/pkg/collector/corechecks/system/cpu/cpu/cpu_ctx_switches_linux.go +++ b/pkg/collector/corechecks/system/cpu/cpu/cpu_ctx_switches_linux.go @@ -15,7 +15,7 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func readCtxSwitches(procStatPath string) (ctxSwitches int64, err error) { @@ -43,8 +43,8 @@ func readCtxSwitches(procStatPath string) (ctxSwitches int64, err error) { func collectCtxSwitches(sender sender.Sender) error { procfsPath := "/proc" - if config.Datadog().IsSet("procfs_path") { - procfsPath = config.Datadog().GetString("procfs_path") + if pkgconfigsetup.Datadog().IsSet("procfs_path") { + procfsPath = pkgconfigsetup.Datadog().GetString("procfs_path") } ctxSwitches, err := readCtxSwitches(filepath.Join(procfsPath, "/stat")) if err != nil { diff --git a/pkg/collector/corechecks/system/cpu/cpu/cpu_test.go b/pkg/collector/corechecks/system/cpu/cpu/cpu_test.go index cd1ffec2b0863..d5124637d6c4a 100644 --- a/pkg/collector/corechecks/system/cpu/cpu/cpu_test.go +++ b/pkg/collector/corechecks/system/cpu/cpu/cpu_test.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/shirou/gopsutil/v3/cpu" @@ -174,7 +174,7 @@ func TestCPUCheckLinuxErrorStoppedSender(t *testing.T) { } func TestCPUCheckLinuxErrorProcFsPathNoExists(t *testing.T) { - config.Datadog().SetDefault("procfs_path", "/tmp") + pkgconfigsetup.Datadog().SetDefault("procfs_path", "/tmp") cpuInfoFunc = func() ([]cpu.InfoStat, error) { return cpuInfo, nil } @@ -201,7 +201,7 @@ func TestCPUCheckLinuxErrorProcFsPathEmptyFile(t *testing.T) { t.Fatal("Error creating temporary file:", err) } defer os.Remove(tempFile.Name()) - config.Datadog().SetDefault("procfs_path", os.TempDir()) + pkgconfigsetup.Datadog().SetDefault("procfs_path", os.TempDir()) cpuInfoFunc = func() ([]cpu.InfoStat, error) { return cpuInfo, nil } @@ -232,7 +232,7 @@ func TestCPUCheckLinuxErrorProcFsPathWrongFormat(t *testing.T) { if err != nil { t.Fatal("Error writing to temporary file:", err) } - config.Datadog().SetDefault("procfs_path", os.TempDir()) + pkgconfigsetup.Datadog().SetDefault("procfs_path", os.TempDir()) cpuInfoFunc = func() ([]cpu.InfoStat, error) { return cpuInfo, nil } diff --git a/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go b/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go index 40fa3c04cb655..c4fb188d2e5d2 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go +++ b/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go @@ -5,6 +5,7 @@ //go:build windows +// Package probe parses Windows crash dumps. package probe /* diff --git a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go index b4a7de8bfba57..ffb9ce3647dbc 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go +++ b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go @@ -8,7 +8,6 @@ package wincrashdetect import ( - "fmt" "net" "net/http" @@ -21,21 +20,34 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/utils" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/system/wincrashdetect/probe" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" - //process_net "github.com/DataDog/datadog-agent/pkg/process/net" + process_net "github.com/DataDog/datadog-agent/pkg/process/net" "golang.org/x/sys/windows/registry" ) +const ( + // SystemProbeTestPipeName is the test named pipe for system-probe + systemProbeTestPipeName = `\\.\pipe\dd_system_probe_wincrash_test` + + // systemProbeTestPipeSecurityDescriptor has a DACL that allows Everyone access for these tests. + systemProbeTestPipeSecurityDescriptor = "D:PAI(A;;FA;;;WD)" +) + func createSystemProbeListener() (l net.Listener, close func()) { - l, err := net.Listen("tcp", "127.0.0.1:0") + process_net.OverrideSystemProbeNamedPipeConfig( + systemProbeTestPipeName, + systemProbeTestPipeSecurityDescriptor) + + // No socket address. Windows uses a fixed name pipe + conn, err := process_net.NewSystemProbeListener("") if err != nil { panic(err) } - return l, func() { - _ = l.Close() + return conn.GetListener(), func() { + _ = conn.GetListener().Close() } } @@ -61,7 +73,7 @@ func TestWinCrashReporting(t *testing.T) { listener, closefunc := createSystemProbeListener() defer closefunc() - config.InitSystemProbeConfig(config.SystemProbe()) + pkgconfigsetup.InitSystemProbeConfig(pkgconfigsetup.SystemProbe()) mux := http.NewServeMux() server := http.Server{ @@ -69,8 +81,8 @@ func TestWinCrashReporting(t *testing.T) { } defer server.Close() - sock := fmt.Sprintf("localhost:%d", listener.Addr().(*net.TCPAddr).Port) - config.SystemProbe().SetWithoutSource("system_probe_config.sysprobe_socket", sock) + // no socket address is set in config for Windows since system probe + // utilizes a fixed named pipe. /* * the underlying system probe connector is a singleton. Therefore, we can't set up different diff --git a/pkg/collector/corechecks/telemetry/check.go b/pkg/collector/corechecks/telemetry/check.go index 91ca99075be10..34299831a4412 100644 --- a/pkg/collector/corechecks/telemetry/check.go +++ b/pkg/collector/corechecks/telemetry/check.go @@ -11,13 +11,14 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + dto "github.com/prometheus/client_model/go" + + "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" - dto "github.com/prometheus/client_model/go" ) const ( @@ -28,10 +29,11 @@ const ( type checkImpl struct { corechecks.CheckBase + telemetry telemetry.Component } func (c *checkImpl) Run() error { - mfs, err := telemetryimpl.GetCompatComponent().Gather(true) + mfs, err := c.telemetry.Gather(true) if err != nil { return err } @@ -105,12 +107,11 @@ func (c *checkImpl) buildTags(lps []*dto.LabelPair) []string { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) -} - -func newCheck() check.Check { - return &checkImpl{ - CheckBase: corechecks.NewCheckBase(CheckName), - } +func Factory(telemetry telemetry.Component) optional.Option[func() check.Check] { + return optional.NewOption(func() check.Check { + return &checkImpl{ + CheckBase: corechecks.NewCheckBase(CheckName), + telemetry: telemetry, + } + }) } diff --git a/pkg/collector/embed_python.go b/pkg/collector/embed_python.go index b8f35b2722b88..c20edcf3b7ae7 100644 --- a/pkg/collector/embed_python.go +++ b/pkg/collector/embed_python.go @@ -9,7 +9,7 @@ package collector import ( "github.com/DataDog/datadog-agent/pkg/collector/python" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -38,8 +38,8 @@ func pySetup(paths ...string) (pythonVersion, pythonHome, pythonPath string) { } func pyPrepareEnv() error { - if config.Datadog().IsSet("procfs_path") { - procfsPath := config.Datadog().GetString("procfs_path") + if pkgconfigsetup.Datadog().IsSet("procfs_path") { + procfsPath := pkgconfigsetup.Datadog().GetString("procfs_path") return python.SetPythonPsutilProcPath(procfsPath) } return nil diff --git a/pkg/collector/python/check.go b/pkg/collector/python/check.go index 304014df9ea49..a1e1c1d7644ae 100644 --- a/pkg/collector/python/check.go +++ b/pkg/collector/python/check.go @@ -26,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/check/stats" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -81,7 +81,7 @@ func NewPythonCheck(senderManager sender.SenderManager, name string, class *C.rt class: class, interval: defaults.DefaultCheckInterval, lastWarnings: []error{}, - telemetry: utils.IsCheckTelemetryEnabled(name, config.Datadog()), + telemetry: utils.IsCheckTelemetryEnabled(name, pkgconfigsetup.Datadog()), } runtime.SetFinalizer(pyCheck, pythonCheckFinalizer) @@ -308,7 +308,7 @@ func (c *PythonCheck) Configure(senderManager sender.SenderManager, integrationC log.Warnf("could not get a '%s' check instance with the new api: %s", c.ModuleName, rtLoaderError) log.Warn("trying to instantiate the check with the old api, passing agentConfig to the constructor") - allSettings := config.Datadog().AllSettings() + allSettings := pkgconfigsetup.Datadog().AllSettings() agentConfig, err := yaml.Marshal(allSettings) if err != nil { log.Errorf("error serializing agent config: %s", err) diff --git a/pkg/collector/python/datadog_agent.go b/pkg/collector/python/datadog_agent.go index f9c3c52b2493a..1c1cf0fefd140 100644 --- a/pkg/collector/python/datadog_agent.go +++ b/pkg/collector/python/datadog_agent.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/externalhost" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/obfuscate" "github.com/DataDog/datadog-agent/pkg/persistentcache" "github.com/DataDog/datadog-agent/pkg/util" @@ -63,7 +63,7 @@ func GetHostname(hostname **C.char) { // //export GetHostTags func GetHostTags(hostTags **C.char) { - tags := hosttags.Get(context.Background(), true, config.Datadog()) + tags := hosttags.Get(context.Background(), true, pkgconfigsetup.Datadog()) tagsBytes, err := json.Marshal(tags) if err != nil { log.Warnf("Error getting host tags: %v. Invalid tags: %v", err, tags) @@ -85,7 +85,7 @@ func GetClusterName(clusterName **C.char) { // //export TracemallocEnabled func TracemallocEnabled() C.bool { - return C.bool(config.Datadog().GetBool("tracemalloc_debug")) + return C.bool(pkgconfigsetup.Datadog().GetBool("tracemalloc_debug")) } // Headers returns a basic set of HTTP headers that can be used by clients in Python checks. @@ -110,12 +110,12 @@ func Headers(yamlPayload **C.char) { //export GetConfig func GetConfig(key *C.char, yamlPayload **C.char) { goKey := C.GoString(key) - if !config.Datadog().IsSet(goKey) { + if !pkgconfigsetup.Datadog().IsSet(goKey) { *yamlPayload = nil return } - value := config.Datadog().Get(goKey) + value := pkgconfigsetup.Datadog().Get(goKey) data, err := yaml.Marshal(value) if err != nil { log.Errorf("could not convert configuration value '%v' to YAML: %s", value, err) @@ -245,12 +245,12 @@ var ( ) // lazyInitObfuscator initializes the obfuscator the first time it is used. We can't initialize during the package init -// because the obfuscator depends on config.Datadog and it isn't guaranteed to be initialized during package init, but +// because the obfuscator depends on pkgconfigsetup.Datadog and it isn't guaranteed to be initialized during package init, but // will definitely be initialized by the time one of the python checks runs func lazyInitObfuscator() *obfuscate.Obfuscator { obfuscatorLoader.Do(func() { var cfg obfuscate.Config - if err := config.Datadog().UnmarshalKey("apm_config.obfuscation", &cfg); err != nil { + if err := pkgconfigsetup.Datadog().UnmarshalKey("apm_config.obfuscation", &cfg); err != nil { log.Errorf("Failed to unmarshal apm_config.obfuscation: %s", err.Error()) cfg = obfuscate.Config{} } @@ -320,6 +320,11 @@ type sqlConfig struct { // By default, identifier quotation is removed during normalization. // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize". KeepIdentifierQuotation bool `json:"keep_identifier_quotation"` + + // KeepJSONPath specifies whether to keep JSON paths following JSON operators in SQL statements in obfuscation. + // By default, JSON paths are treated as literals and are obfuscated to ?, e.g. "data::jsonb -> 'name'" -> "data::jsonb -> ?". + // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize". + KeepJSONPath bool `json:"keep_json_path" yaml:"keep_json_path"` } // ObfuscateSQL obfuscates & normalizes the provided SQL query, writing the error into errResult if the operation @@ -354,6 +359,7 @@ func ObfuscateSQL(rawQuery, opts *C.char, errResult **C.char) *C.char { KeepPositionalParameter: sqlOpts.KeepPositionalParameter, KeepTrailingSemicolon: sqlOpts.KeepTrailingSemicolon, KeepIdentifierQuotation: sqlOpts.KeepIdentifierQuotation, + KeepJSONPath: sqlOpts.KeepJSONPath, }) if err != nil { // memory will be freed by caller @@ -586,7 +592,7 @@ var defaultMongoObfuscateSettings = obfuscate.JSONConfig{ //export getProcessStartTime func getProcessStartTime() float64 { - return float64(config.StartTime.Unix()) + return float64(pkgconfigsetup.StartTime.Unix()) } // ObfuscateMongoDBString obfuscates the MongoDB query diff --git a/pkg/collector/python/init.go b/pkg/collector/python/init.go index 5b905debba7ba..04217d158e9a4 100644 --- a/pkg/collector/python/init.go +++ b/pkg/collector/python/init.go @@ -21,7 +21,7 @@ import ( "unsafe" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -376,11 +376,11 @@ func resolvePythonExecPath(pythonVersion string, ignoreErrors bool) (string, err //nolint:revive // TODO(AML) Fix revive linter func Initialize(paths ...string) error { - pythonVersion := config.Datadog().GetString("python_version") - allowPathHeuristicsFailure := config.Datadog().GetBool("allow_python_path_heuristics_failure") + pythonVersion := pkgconfigsetup.Datadog().GetString("python_version") + allowPathHeuristicsFailure := pkgconfigsetup.Datadog().GetBool("allow_python_path_heuristics_failure") // Memory related RTLoader-global initialization - if config.Datadog().GetBool("memtrack_enabled") { + if pkgconfigsetup.Datadog().GetBool("memtrack_enabled") { C.initMemoryTracker() } @@ -426,7 +426,7 @@ func Initialize(paths ...string) error { return err } - if config.Datadog().GetBool("telemetry.enabled") && config.Datadog().GetBool("telemetry.python_memory") { + if pkgconfigsetup.Datadog().GetBool("telemetry.enabled") && pkgconfigsetup.Datadog().GetBool("telemetry.python_memory") { initPymemTelemetry() } diff --git a/pkg/collector/python/init_nix.go b/pkg/collector/python/init_nix.go index ddfee4f741edd..52cd96fb9657d 100644 --- a/pkg/collector/python/init_nix.go +++ b/pkg/collector/python/init_nix.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) /* @@ -26,10 +26,10 @@ import "C" // Any platform-specific initialization belongs here. func initializePlatform() error { // Setup crash handling specifics - *NIX-only - if config.Datadog().GetBool("c_stacktrace_collection") { + if pkgconfigsetup.Datadog().GetBool("c_stacktrace_collection") { var cCoreDump int - if config.Datadog().GetBool("c_core_dump") { + if pkgconfigsetup.Datadog().GetBool("c_core_dump") { cCoreDump = 1 } diff --git a/pkg/collector/python/init_windows.go b/pkg/collector/python/init_windows.go index 48a31a741125d..04d18175df1f7 100644 --- a/pkg/collector/python/init_windows.go +++ b/pkg/collector/python/init_windows.go @@ -10,14 +10,14 @@ package python import ( "os" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Any platform-specific initialization belongs here. func initializePlatform() error { // On Windows, it's not uncommon to have a system-wide PYTHONPATH env var set. // Unset it, so our embedded python doesn't try to load things from the system. - if !config.Datadog().GetBool("windows_use_pythonpath") { + if !pkgconfigsetup.Datadog().GetBool("windows_use_pythonpath") { os.Unsetenv("PYTHONPATH") } diff --git a/pkg/collector/python/loader.go b/pkg/collector/python/loader.go index 969a3e10c07f9..66df21aaf6944 100644 --- a/pkg/collector/python/loader.go +++ b/pkg/collector/python/loader.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/loaders" - agentConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -129,7 +129,7 @@ func (cl *PythonCheckLoader) Load(senderManager sender.SenderManager, config int defer glock.unlock() // Platform-specific preparation - if !agentConfig.Datadog().GetBool("win_skip_com_init") { + if !pkgconfigsetup.Datadog().GetBool("win_skip_com_init") { log.Debugf("Performing platform loading prep") err = platformLoaderPrep() if err != nil { @@ -186,7 +186,7 @@ func (cl *PythonCheckLoader) Load(senderManager sender.SenderManager, config int log.Debugf("python check '%s' doesn't have a '__version__' attribute: %s", config.Name, getRtLoaderError()) } - if !agentConfig.Datadog().GetBool("disable_py3_validation") && !loadedAsWheel { + if !pkgconfigsetup.Datadog().GetBool("disable_py3_validation") && !loadedAsWheel { // Customers, though unlikely might version their custom checks. // Let's use the module namespace to try to decide if this was a // custom check, check for py3 compatibility @@ -288,7 +288,7 @@ func reportPy3Warnings(checkName string, checkFilePath string) { checkFilePath = checkFilePath[:len(checkFilePath)-1] } - if strings.TrimSpace(agentConfig.Datadog().GetString("python_version")) == "3" { + if strings.TrimSpace(pkgconfigsetup.Datadog().GetString("python_version")) == "3" { // the linter used by validatePython3 doesn't work when run from python3 status = a7TagPython3 metricValue = 1.0 diff --git a/pkg/collector/python/memory.go b/pkg/collector/python/memory.go index 5d6fdb403de62..38060ef37b462 100644 --- a/pkg/collector/python/memory.go +++ b/pkg/collector/python/memory.go @@ -16,7 +16,7 @@ import ( "github.com/cihub/seelog" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -118,7 +118,7 @@ func TrackedCString(str string) *C.char { cstr := C.CString(str) // TODO(memory-tracking): track the origin of the string (for example check name) - if config.Datadog().GetBool("memtrack_enabled") { + if pkgconfigsetup.Datadog().GetBool("memtrack_enabled") { MemoryTracker(unsafe.Pointer(cstr), C.size_t(len(str)+1), C.DATADOG_AGENT_RTLOADER_ALLOCATION) } diff --git a/pkg/collector/python/py3_checker.go b/pkg/collector/python/py3_checker.go index a72e31463b3c9..131dfb73ee7c5 100644 --- a/pkg/collector/python/py3_checker.go +++ b/pkg/collector/python/py3_checker.go @@ -16,11 +16,11 @@ import ( "path/filepath" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var ( - linterTimeout = time.Duration(config.Datadog().GetInt("python3_linter_timeout")) * time.Second + linterTimeout = time.Duration(pkgconfigsetup.Datadog().GetInt("python3_linter_timeout")) * time.Second ) type warning struct { diff --git a/pkg/collector/runner/runner.go b/pkg/collector/runner/runner.go index e8be2f5cef27a..e6ba495f02f2b 100644 --- a/pkg/collector/runner/runner.go +++ b/pkg/collector/runner/runner.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/runner/tracker" "github.com/DataDog/datadog-agent/pkg/collector/scheduler" "github.com/DataDog/datadog-agent/pkg/collector/worker" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -53,7 +53,7 @@ type Runner struct { // NewRunner takes the number of desired goroutines processing incoming checks. func NewRunner(senderManager sender.SenderManager) *Runner { - numWorkers := config.Datadog().GetInt("check_runners") + numWorkers := pkgconfigsetup.Datadog().GetInt("check_runners") r := &Runner{ senderManager: senderManager, @@ -66,7 +66,7 @@ func NewRunner(senderManager sender.SenderManager) *Runner { } if !r.isStaticWorkerCount { - numWorkers = config.DefaultNumWorkers + numWorkers = pkgconfigsetup.DefaultNumWorkers } r.ensureMinWorkers(numWorkers) @@ -164,7 +164,7 @@ func (r *Runner) UpdateNumWorkers(numChecks int64) { case numChecks <= 25: desiredNumWorkers = 20 default: - desiredNumWorkers = config.MaxNumWorkers + desiredNumWorkers = pkgconfigsetup.MaxNumWorkers } r.ensureMinWorkers(desiredNumWorkers) diff --git a/pkg/collector/runner/runner_test.go b/pkg/collector/runner/runner_test.go index 5771b886e0ba7..6ae3cd335c81b 100644 --- a/pkg/collector/runner/runner_test.go +++ b/pkg/collector/runner/runner_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/stub" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" "github.com/DataDog/datadog-agent/pkg/collector/scheduler" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Fixtures @@ -143,14 +143,14 @@ func assertAsyncBool(t *testing.T, actualValueFunc func() bool, expectedValue bo func testSetUp(t *testing.T) { assertAsyncWorkerCount(t, 0) expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") } // Tests func TestNewRunner(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "3") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "3") r := NewRunner(aggregator.NewNoOpSenderManager()) require.NotNil(t, r) @@ -164,7 +164,7 @@ func TestNewRunner(t *testing.T) { func TestRunnerAddWorker(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "1") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "1") r := NewRunner(aggregator.NewNoOpSenderManager()) require.NotNil(t, r) @@ -179,7 +179,7 @@ func TestRunnerAddWorker(t *testing.T) { func TestRunnerStaticUpdateNumWorkers(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "2") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "2") r := NewRunner(aggregator.NewNoOpSenderManager()) require.NotNil(t, r) @@ -198,14 +198,14 @@ func TestRunnerStaticUpdateNumWorkers(t *testing.T) { func TestRunnerDynamicUpdateNumWorkers(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "0") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "0") testCases := [][]int{ {0, 10, 4}, {11, 15, 10}, {16, 20, 15}, {21, 25, 20}, - {26, 35, config.MaxNumWorkers}, + {26, 35, pkgconfigsetup.MaxNumWorkers}, } for _, testCase := range testCases { @@ -251,7 +251,7 @@ func TestRunner(t *testing.T) { func TestRunnerStop(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "10") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "10") numChecks := 8 checks := make([]*testCheck, numChecks) @@ -304,7 +304,7 @@ func TestRunnerStop(t *testing.T) { func TestRunnerStopWithStuckCheck(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "10") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "10") numChecks := 8 checks := make([]*testCheck, numChecks) @@ -360,7 +360,7 @@ func TestRunnerStopWithStuckCheck(t *testing.T) { func TestRunnerStopCheck(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "3") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "3") testCheck := newCheck(t, "mycheck:123", false, nil) blockedCheck := newCheck(t, "mycheck2:123", false, nil) @@ -408,7 +408,7 @@ func TestRunnerStopCheck(t *testing.T) { func TestRunnerScheduler(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "3") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "3") sched1 := newScheduler() sched2 := newScheduler() @@ -428,7 +428,7 @@ func TestRunnerScheduler(t *testing.T) { func TestRunnerShouldAddCheckStats(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "3") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "3") testCheck := newCheck(t, "test", false, nil) sched := newScheduler() diff --git a/pkg/collector/worker/check_logger.go b/pkg/collector/worker/check_logger.go index 6ce024c3b20ea..c26cb4e6ef2f3 100644 --- a/pkg/collector/worker/check_logger.go +++ b/pkg/collector/worker/check_logger.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -52,7 +52,7 @@ func (cl *CheckLogger) CheckFinished() { if cl.lastVerboseLog { message += fmt.Sprintf( ", next runs will be logged every %v runs", - config.Datadog().GetInt64(loggingFrequencyConfigKey), + pkgconfigsetup.Datadog().GetInt64(loggingFrequencyConfigKey), ) } @@ -79,7 +79,7 @@ func (cl *CheckLogger) Debug(message string) { // shouldLogCheck returns if we should log the check start/stop message with higher // verbosity and if this is the end of the initial series of check log statements func shouldLogCheck(id checkid.ID) (shouldLog, lastVerboseLog bool) { - loggingFrequency := uint64(config.Datadog().GetInt64(loggingFrequencyConfigKey)) + loggingFrequency := uint64(pkgconfigsetup.Datadog().GetInt64(loggingFrequencyConfigKey)) // If this is the first time we see the check, log it stats, idFound := expvars.CheckStats(id) diff --git a/pkg/collector/worker/check_logger_test.go b/pkg/collector/worker/check_logger_test.go index 9c0431dd472ec..41ad99b777f89 100644 --- a/pkg/collector/worker/check_logger_test.go +++ b/pkg/collector/worker/check_logger_test.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/stats" "github.com/DataDog/datadog-agent/pkg/collector/check/stub" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) type stubCheck struct { @@ -36,7 +36,7 @@ func addExpvarsCheckStats(c check.Check) { } func setUp() { - config.Datadog().SetWithoutSource(loggingFrequencyConfigKey, "20") + pkgconfigsetup.Datadog().SetWithoutSource(loggingFrequencyConfigKey, "20") expvars.Reset() } diff --git a/pkg/collector/worker/worker.go b/pkg/collector/worker/worker.go index da60890471f06..7a366434e317a 100644 --- a/pkg/collector/worker/worker.go +++ b/pkg/collector/worker/worker.go @@ -15,7 +15,7 @@ import ( checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" "github.com/DataDog/datadog-agent/pkg/collector/runner/tracker" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -179,7 +179,7 @@ func (w *Worker) Run() { } if sender != nil && !longRunning { - if config.Datadog().GetBool("integration_check_status_enabled") { + if pkgconfigsetup.Datadog().GetBool("integration_check_status_enabled") { sender.ServiceCheck(serviceCheckStatusKey, serviceCheckStatus, hname, serviceCheckTags, "") } // FIXME(remy): this `Commit()` should be part of the `if` above, we keep diff --git a/pkg/collector/worker/worker_test.go b/pkg/collector/worker/worker_test.go index c31e4613be7ae..129ed30a499e8 100644 --- a/pkg/collector/worker/worker_test.go +++ b/pkg/collector/worker/worker_test.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/stub" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" "github.com/DataDog/datadog-agent/pkg/collector/runner/tracker" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" ) @@ -182,7 +182,7 @@ func TestWorkerName(t *testing.T) { func TestWorker(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") var wg sync.WaitGroup @@ -258,7 +258,7 @@ func TestWorker(t *testing.T) { func TestWorkerUtilizationExpvars(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") var wg sync.WaitGroup @@ -328,7 +328,7 @@ func TestWorkerUtilizationExpvars(t *testing.T) { func TestWorkerErrorAndWarningHandling(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") var wg sync.WaitGroup @@ -336,7 +336,7 @@ func TestWorkerErrorAndWarningHandling(t *testing.T) { pendingChecksChan := make(chan check.Check, 10) mockShouldAddStatsFunc := func(checkid.ID) bool { return true } - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") testCheck1 := newCheck(t, "testing:123", true, nil) testCheck2 := newCheck(t, "testing2:234", true, nil) @@ -383,13 +383,13 @@ func TestWorkerErrorAndWarningHandling(t *testing.T) { func TestWorkerConcurrentCheckScheduling(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") checksTracker := tracker.NewRunningChecksTracker() pendingChecksChan := make(chan check.Check, 10) mockShouldAddStatsFunc := func(checkid.ID) bool { return true } - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") testCheck := newCheck(t, "testing:123", true, nil) @@ -412,7 +412,7 @@ func TestWorkerConcurrentCheckScheduling(t *testing.T) { func TestWorkerStatsAddition(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") checksTracker := tracker.NewRunningChecksTracker() pendingChecksChan := make(chan check.Check, 10) @@ -421,7 +421,7 @@ func TestWorkerStatsAddition(t *testing.T) { return string(id) != "squelched:123" } - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") longRunningCheckNoErrorNoWarning := &testCheck{ t: t, @@ -471,8 +471,8 @@ func TestWorkerStatsAddition(t *testing.T) { func TestWorkerServiceCheckSending(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") - config.Datadog().SetWithoutSource("integration_check_status_enabled", "true") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("integration_check_status_enabled", "true") var wg sync.WaitGroup @@ -557,7 +557,7 @@ func TestWorkerServiceCheckSending(t *testing.T) { func TestWorkerSenderNil(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") checksTracker := tracker.NewRunningChecksTracker() pendingChecksChan := make(chan check.Check, 10) @@ -588,7 +588,7 @@ func TestWorkerSenderNil(t *testing.T) { func TestWorkerServiceCheckSendingLongRunningTasks(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") checksTracker := tracker.NewRunningChecksTracker() pendingChecksChan := make(chan check.Check, 10) diff --git a/pkg/commonchecks/corechecks.go b/pkg/commonchecks/corechecks.go index 4bc7dd5649d2d..9cebc3657119a 100644 --- a/pkg/commonchecks/corechecks.go +++ b/pkg/commonchecks/corechecks.go @@ -58,7 +58,7 @@ func RegisterChecks(store workloadmeta.Component, cfg config.Component, telemetr corecheckLoader.RegisterCheck(cpu.CheckName, cpu.Factory()) corecheckLoader.RegisterCheck(memory.CheckName, memory.Factory()) corecheckLoader.RegisterCheck(uptime.CheckName, uptime.Factory()) - corecheckLoader.RegisterCheck(telemetryCheck.CheckName, telemetryCheck.Factory()) + corecheckLoader.RegisterCheck(telemetryCheck.CheckName, telemetryCheck.Factory(telemetry)) corecheckLoader.RegisterCheck(ntp.CheckName, ntp.Factory()) corecheckLoader.RegisterCheck(snmp.CheckName, snmp.Factory()) corecheckLoader.RegisterCheck(networkpath.CheckName, networkpath.Factory(telemetry)) diff --git a/pkg/compliance/agent.go b/pkg/compliance/agent.go index d7d2dfb9acae6..1c73f1b3f5321 100644 --- a/pkg/compliance/agent.go +++ b/pkg/compliance/agent.go @@ -23,19 +23,20 @@ import ( "sync" "time" + "github.com/shirou/gopsutil/v3/process" + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/compliance/aptconfig" "github.com/DataDog/datadog-agent/pkg/compliance/dbconfig" "github.com/DataDog/datadog-agent/pkg/compliance/k8sconfig" "github.com/DataDog/datadog-agent/pkg/compliance/metrics" "github.com/DataDog/datadog-agent/pkg/compliance/utils" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/rules" secl "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/shirou/gopsutil/v3/process" ) const containersCountMetricName = "datadog.security_agent.compliance.containers_running" @@ -129,7 +130,7 @@ type Agent struct { } func xccdfEnabled() bool { - return config.Datadog().GetBool("compliance_config.xccdf.enabled") || config.Datadog().GetBool("compliance_config.host_benchmarks.enabled") + return pkgconfigsetup.Datadog().GetBool("compliance_config.xccdf.enabled") || pkgconfigsetup.Datadog().GetBool("compliance_config.host_benchmarks.enabled") } // DefaultRuleFilter implements the default filtering of benchmarks' rules. It diff --git a/pkg/compliance/reporter.go b/pkg/compliance/reporter.go index 2bbf3a39f4c27..501ca6f10bc43 100644 --- a/pkg/compliance/reporter.go +++ b/pkg/compliance/reporter.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" @@ -44,7 +44,7 @@ func NewLogReporter(hostname string, sourceName, sourceType string, endpoints *c auditor.Start() // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(config.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, dstcontext, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), coreconfig.Datadog()) + pipelineProvider := pipeline.NewProvider(config.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, dstcontext, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) pipelineProvider.Start() logSource := sources.NewLogSource( @@ -62,7 +62,7 @@ func NewLogReporter(hostname string, sourceName, sourceType string, endpoints *c } // merge tags from config - for _, tag := range configUtils.GetConfiguredTags(coreconfig.Datadog(), true) { + for _, tag := range configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true) { if strings.HasPrefix(tag, "host") { continue } diff --git a/pkg/config/aliases.go b/pkg/config/aliases.go deleted file mode 100644 index 8114edc845796..0000000000000 --- a/pkg/config/aliases.go +++ /dev/null @@ -1,217 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package config defines the configuration of the agent -package config - -import ( - "context" - - slog "github.com/cihub/seelog" - - "github.com/DataDog/datadog-agent/comp/core/secrets" - "github.com/DataDog/datadog-agent/pkg/config/env" - "github.com/DataDog/datadog-agent/pkg/config/model" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" - "github.com/DataDog/datadog-agent/pkg/util/optional" -) - -// Aliases to conf package -type ( - // Proxy alias to model.Proxy - Proxy = model.Proxy - // Reader is alias to model.Reader - Reader = model.Reader - // Writer is alias to model.Reader - Writer = model.Writer - // ReaderWriter is alias to model.ReaderWriter - ReaderWriter = model.ReaderWriter - // Config is alias to model.Config - Config = model.Config -) - -// NewConfig is alias for Config object. -var NewConfig = model.NewConfig - -// Warnings represent the warnings in the config -type Warnings = model.Warnings - -var ( - // Datadog Alias - Datadog = pkgconfigsetup.Datadog - // SystemProbe Alias - SystemProbe = pkgconfigsetup.SystemProbe -) - -// IsAutoconfigEnabled is alias for model.IsAutoconfigEnabled -func IsAutoconfigEnabled() bool { - return env.IsAutoconfigEnabled(Datadog()) -} - -// Aliases for config overrides -var ( - AddOverride = model.AddOverride - AddOverrides = model.AddOverrides - AddOverrideFunc = model.AddOverrideFunc -) - -// LoggerName Alias -type LoggerName = pkglogsetup.LoggerName - -// Aliases for logs -var ( - NewLogWriter = pkglogsetup.NewLogWriter - NewTLSHandshakeErrorWriter = pkglogsetup.NewTLSHandshakeErrorWriter -) - -// SetupLogger Alias using Datadog config -func SetupLogger(loggerName LoggerName, logLevel, logFile, syslogURI string, syslogRFC, logToConsole, jsonFormat bool) error { - return pkglogsetup.SetupLogger(loggerName, logLevel, logFile, syslogURI, syslogRFC, logToConsole, jsonFormat, Datadog()) -} - -// SetupJMXLogger Alias using Datadog config -func SetupJMXLogger(logFile, syslogURI string, syslogRFC, logToConsole, jsonFormat bool) error { - return pkglogsetup.SetupJMXLogger(logFile, syslogURI, syslogRFC, logToConsole, jsonFormat, Datadog()) -} - -// GetSyslogURI Alias using Datadog config -func GetSyslogURI() string { - return pkglogsetup.GetSyslogURI(Datadog()) -} - -// SetupDogstatsdLogger Alias using Datadog config -func SetupDogstatsdLogger(logFile string) (slog.LoggerInterface, error) { - return pkglogsetup.SetupDogstatsdLogger(logFile, Datadog()) -} - -// IsCloudProviderEnabled Alias using Datadog config -func IsCloudProviderEnabled(cloudProvider string) bool { - return pkgconfigsetup.IsCloudProviderEnabled(cloudProvider, Datadog()) -} - -// GetIPCAddress Alias using Datadog config -func GetIPCAddress() (string, error) { - return pkgconfigsetup.GetIPCAddress(Datadog()) -} - -// Datatype Aliases -const ( - Metrics = pkgconfigsetup.Metrics - Traces = pkgconfigsetup.Traces - Logs = pkgconfigsetup.Logs -) - -// Aliases for config defaults -const ( - DefaultForwarderRecoveryInterval = pkgconfigsetup.DefaultForwarderRecoveryInterval - DefaultAPIKeyValidationInterval = pkgconfigsetup.DefaultAPIKeyValidationInterval - DefaultBatchWait = pkgconfigsetup.DefaultBatchWait - DefaultInputChanSize = pkgconfigsetup.DefaultInputChanSize - DefaultBatchMaxConcurrentSend = pkgconfigsetup.DefaultBatchMaxConcurrentSend - DefaultBatchMaxContentSize = pkgconfigsetup.DefaultBatchMaxContentSize - DefaultLogsSenderBackoffRecoveryInterval = pkgconfigsetup.DefaultLogsSenderBackoffRecoveryInterval - DefaultLogsSenderBackoffMax = pkgconfigsetup.DefaultLogsSenderBackoffMax - DefaultLogsSenderBackoffFactor = pkgconfigsetup.DefaultLogsSenderBackoffFactor - DefaultLogsSenderBackoffBase = pkgconfigsetup.DefaultLogsSenderBackoffBase - DefaultBatchMaxSize = pkgconfigsetup.DefaultBatchMaxSize - DefaultNumWorkers = pkgconfigsetup.DefaultNumWorkers - MaxNumWorkers = pkgconfigsetup.MaxNumWorkers - DefaultSite = pkgconfigsetup.DefaultSite - OTLPTracePort = pkgconfigsetup.OTLPTracePort - DefaultAuditorTTL = pkgconfigsetup.DefaultAuditorTTL - DefaultMaxMessageSizeBytes = pkgconfigsetup.DefaultMaxMessageSizeBytes - DefaultProcessEntityStreamPort = pkgconfigsetup.DefaultProcessEntityStreamPort - DefaultProcessEventsCheckInterval = pkgconfigsetup.DefaultProcessEventsCheckInterval - DefaultProcessEventsMinCheckInterval = pkgconfigsetup.DefaultProcessEventsMinCheckInterval - ProcessMaxPerMessageLimit = pkgconfigsetup.ProcessMaxPerMessageLimit - DefaultProcessMaxPerMessage = pkgconfigsetup.DefaultProcessMaxPerMessage - ProcessMaxMessageBytesLimit = pkgconfigsetup.ProcessMaxMessageBytesLimit - DefaultProcessDiscoveryHintFrequency = pkgconfigsetup.DefaultProcessDiscoveryHintFrequency - DefaultProcessMaxMessageBytes = pkgconfigsetup.DefaultProcessMaxMessageBytes - DefaultProcessExpVarPort = pkgconfigsetup.DefaultProcessExpVarPort - DefaultProcessQueueBytes = pkgconfigsetup.DefaultProcessQueueBytes - DefaultProcessQueueSize = pkgconfigsetup.DefaultProcessQueueSize - DefaultProcessRTQueueSize = pkgconfigsetup.DefaultProcessRTQueueSize - DefaultRuntimePoliciesDir = pkgconfigsetup.DefaultRuntimePoliciesDir - DefaultGRPCConnectionTimeoutSecs = pkgconfigsetup.DefaultGRPCConnectionTimeoutSecs - DefaultProcessEndpoint = pkgconfigsetup.DefaultProcessEndpoint - DefaultProcessEventsEndpoint = pkgconfigsetup.DefaultProcessEventsEndpoint -) - -type ( - // ConfigurationProviders Alias - ConfigurationProviders = pkgconfigsetup.ConfigurationProviders - // Listeners Alias - Listeners = pkgconfigsetup.Listeners -) - -// GetObsPipelineURL Alias using Datadog config -func GetObsPipelineURL(datatype pkgconfigsetup.DataType) (string, error) { - return pkgconfigsetup.GetObsPipelineURL(datatype, Datadog()) -} - -// LoadCustom Alias -func LoadCustom(config model.Config, additionalKnownEnvVars []string) error { - return pkgconfigsetup.LoadCustom(config, additionalKnownEnvVars) -} - -// LoadDatadogCustom Alias -func LoadDatadogCustom(config model.Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*model.Warnings, error) { - return pkgconfigsetup.LoadDatadogCustom(config, origin, secretResolver, additionalKnownEnvVars) -} - -// GetValidHostAliases Alias using Datadog config -func GetValidHostAliases(ctx context.Context) ([]string, error) { - return pkgconfigsetup.GetValidHostAliases(ctx, Datadog()) -} - -// IsCLCRunner Alias using Datadog config -func IsCLCRunner() bool { - return pkgconfigsetup.IsCLCRunner(Datadog()) -} - -// GetBindHostFromConfig Alias using Datadog config -func GetBindHostFromConfig(config model.Reader) string { - return pkgconfigsetup.GetBindHostFromConfig(config) -} - -// GetBindHost Alias using Datadog config -func GetBindHost() string { - return pkgconfigsetup.GetBindHost(Datadog()) -} - -var ( - // IsRemoteConfigEnabled Alias - IsRemoteConfigEnabled = pkgconfigsetup.IsRemoteConfigEnabled - // StartTime Alias - StartTime = pkgconfigsetup.StartTime - // StandardJMXIntegrations Alias - StandardJMXIntegrations = pkgconfigsetup.StandardJMXIntegrations - // SetupOTLP Alias - SetupOTLP = pkgconfigsetup.OTLP - // InitSystemProbeConfig Alias - InitSystemProbeConfig = pkgconfigsetup.InitSystemProbeConfig - // InitConfig Alias - InitConfig = pkgconfigsetup.InitConfig - - // GetRemoteConfigurationAllowedIntegrations Alias - GetRemoteConfigurationAllowedIntegrations = pkgconfigsetup.GetRemoteConfigurationAllowedIntegrations - // LoadProxyFromEnv Alias - LoadProxyFromEnv = pkgconfigsetup.LoadProxyFromEnv - - // GetIPCPort Alias - GetIPCPort = pkgconfigsetup.GetIPCPort -) - -// LoadWithoutSecret Alias using Datadog config -func LoadWithoutSecret() (*model.Warnings, error) { - return pkgconfigsetup.LoadDatadogCustom(Datadog(), "datadog.yaml", optional.NewNoneOption[secrets.Component](), SystemProbe().GetEnvVars()) -} - -// GetProcessAPIAddressPort Alias using Datadog config -func GetProcessAPIAddressPort() (string, error) { - return pkgconfigsetup.GetProcessAPIAddressPort(Datadog()) -} diff --git a/pkg/config/aliases_darwin.go b/pkg/config/aliases_darwin.go deleted file mode 100644 index 17dc1cb3a0f76..0000000000000 --- a/pkg/config/aliases_darwin.go +++ /dev/null @@ -1,18 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package config - -import ( - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" -) - -// Aliases to setup package -const ( - DefaultUpdaterLogFile = pkgconfigsetup.DefaultUpdaterLogFile - DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile - DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile - DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin -) diff --git a/pkg/config/aliases_nix.go b/pkg/config/aliases_nix.go deleted file mode 100644 index 4bbce899be6bd..0000000000000 --- a/pkg/config/aliases_nix.go +++ /dev/null @@ -1,20 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build linux || freebsd || netbsd || openbsd || solaris || dragonfly || aix - -package config - -import ( - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" -) - -// Aliases to setup package -var ( - DefaultUpdaterLogFile = pkgconfigsetup.DefaultUpdaterLogFile - DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile - DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile - DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin -) diff --git a/pkg/config/aliases_windows.go b/pkg/config/aliases_windows.go deleted file mode 100644 index cab0f23bfedbd..0000000000000 --- a/pkg/config/aliases_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package config - -import ( - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" -) - -// Aliases to setup package -var ( - DefaultUpdaterLogFile = pkgconfigsetup.DefaultUpdaterLogFile - DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile - DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile - DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin -) diff --git a/pkg/config/autodiscovery/autodiscovery.go b/pkg/config/autodiscovery/autodiscovery.go index ccd4ae2930eb4..1f1455c39761e 100644 --- a/pkg/config/autodiscovery/autodiscovery.go +++ b/pkg/config/autodiscovery/autodiscovery.go @@ -11,32 +11,32 @@ package autodiscovery import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" snmplistener "github.com/DataDog/datadog-agent/pkg/snmp" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" ) // DiscoverComponentsFromConfig returns a list of AD Providers and Listeners based on the agent configuration -func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.Listeners) { - detectedProviders := []config.ConfigurationProviders{} - detectedListeners := []config.Listeners{} +func DiscoverComponentsFromConfig() ([]pkgconfigsetup.ConfigurationProviders, []pkgconfigsetup.Listeners) { + detectedProviders := []pkgconfigsetup.ConfigurationProviders{} + detectedListeners := []pkgconfigsetup.Listeners{} // Auto-add Prometheus config provider based on `prometheus_scrape.enabled` - if config.Datadog().GetBool("prometheus_scrape.enabled") { - var prometheusProvider config.ConfigurationProviders + if pkgconfigsetup.Datadog().GetBool("prometheus_scrape.enabled") { + var prometheusProvider pkgconfigsetup.ConfigurationProviders if flavor.GetFlavor() == flavor.ClusterAgent { - prometheusProvider = config.ConfigurationProviders{Name: "prometheus_services", Polling: true} + prometheusProvider = pkgconfigsetup.ConfigurationProviders{Name: "prometheus_services", Polling: true} } else { - prometheusProvider = config.ConfigurationProviders{Name: "prometheus_pods", Polling: true} + prometheusProvider = pkgconfigsetup.ConfigurationProviders{Name: "prometheus_pods", Polling: true} } log.Infof("Prometheus scraping is enabled: Adding the Prometheus config provider '%s'", prometheusProvider.Name) detectedProviders = append(detectedProviders, prometheusProvider) } // Add database-monitoring aurora listener if the feature is enabled - if config.Datadog().GetBool("database_monitoring.autodiscovery.aurora.enabled") { - detectedListeners = append(detectedListeners, config.Listeners{Name: "database-monitoring-aurora"}) + if pkgconfigsetup.Datadog().GetBool("database_monitoring.autodiscovery.aurora.enabled") { + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "database-monitoring-aurora"}) log.Info("Database monitoring aurora discovery is enabled: Adding the aurora listener") } @@ -55,7 +55,7 @@ func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.L log.Info("Configs with advanced kube service identifiers detected: Adding the 'kube service file' config provider") // Polling is set to false because kube_services_file is a static config provider. // It generates entity IDs based on the provided advanced config: kube_service:/// - detectedProviders = append(detectedProviders, config.ConfigurationProviders{Name: names.KubeServicesFileRegisterName, Polling: false}) + detectedProviders = append(detectedProviders, pkgconfigsetup.ConfigurationProviders{Name: names.KubeServicesFileRegisterName, Polling: false}) } if !epFound && !adv.KubeEndpoints.IsEmpty() { @@ -64,7 +64,7 @@ func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.L // Polling is set to true because kube_endpoints_file is a dynamic config provider. // It generates entity IDs based on the provided advanced config + the IPs found in the corresponding Endpoints object: kube_endpoint://// // The generated entity IDs are subject to change, thus the continuous polling. - detectedProviders = append(detectedProviders, config.ConfigurationProviders{Name: names.KubeEndpointsFileRegisterName, Polling: true}) + detectedProviders = append(detectedProviders, pkgconfigsetup.ConfigurationProviders{Name: names.KubeEndpointsFileRegisterName, Polling: true}) } } @@ -76,10 +76,10 @@ func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.L // Auto-activate autodiscovery without listeners: - snmp configs := []snmplistener.Config{} - err := config.Datadog().UnmarshalKey("network_devices.autodiscovery.configs", &configs) + err := pkgconfigsetup.Datadog().UnmarshalKey("network_devices.autodiscovery.configs", &configs) if err == nil && len(configs) > 0 { - detectedListeners = append(detectedListeners, config.Listeners{Name: "snmp"}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "snmp"}) log.Info("Configs for autodiscovery detected: Adding the snmp listener") } @@ -87,14 +87,14 @@ func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.L } // DiscoverComponentsFromEnv returns a list of AD Providers and Listeners based on environment characteristics -func DiscoverComponentsFromEnv() ([]config.ConfigurationProviders, []config.Listeners) { - detectedProviders := []config.ConfigurationProviders{} - detectedListeners := []config.Listeners{} +func DiscoverComponentsFromEnv() ([]pkgconfigsetup.ConfigurationProviders, []pkgconfigsetup.Listeners) { + detectedProviders := []pkgconfigsetup.ConfigurationProviders{} + detectedListeners := []pkgconfigsetup.Listeners{} // When using automatic discovery of providers/listeners // We automatically activate the environment and static config listener - detectedListeners = append(detectedListeners, config.Listeners{Name: "environment"}) - detectedListeners = append(detectedListeners, config.Listeners{Name: "static config"}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "environment"}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "static config"}) // Automatic handling of AD providers/listeners should only run in the core or process agent. if flavor.GetFlavor() != flavor.DefaultAgent && flavor.GetFlavor() != flavor.ProcessAgent { @@ -108,17 +108,17 @@ func DiscoverComponentsFromEnv() ([]config.ConfigurationProviders, []config.List isKubeEnv := env.IsFeaturePresent(env.Kubernetes) if isContainerEnv || isKubeEnv { - detectedProviders = append(detectedProviders, config.ConfigurationProviders{Name: names.KubeContainer}) + detectedProviders = append(detectedProviders, pkgconfigsetup.ConfigurationProviders{Name: names.KubeContainer}) log.Info("Adding KubeContainer provider from environment") } if isContainerEnv && !isKubeEnv { - detectedListeners = append(detectedListeners, config.Listeners{Name: names.Container}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: names.Container}) log.Info("Adding Container listener from environment") } if isKubeEnv { - detectedListeners = append(detectedListeners, config.Listeners{Name: "kubelet"}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "kubelet"}) log.Info("Adding Kubelet listener from environment") } diff --git a/pkg/config/autodiscovery/autodiscovery_test.go b/pkg/config/autodiscovery/autodiscovery_test.go index 45499431766f9..09a7f28ae1a04 100644 --- a/pkg/config/autodiscovery/autodiscovery_test.go +++ b/pkg/config/autodiscovery/autodiscovery_test.go @@ -9,14 +9,15 @@ import ( "strings" "testing" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/stretchr/testify/assert" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestDiscoverComponentsFromConfigForSnmp(t *testing.T) { - config.Datadog().SetConfigType("yaml") + pkgconfigsetup.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -27,7 +28,7 @@ network_devices: assert.Len(t, configListeners, 1) assert.Equal(t, "snmp", configListeners[0].Name) - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -36,7 +37,7 @@ network_devices: _, configListeners = DiscoverComponentsFromConfig() assert.Empty(t, len(configListeners)) - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: configs: - network: 127.0.0.1/30 diff --git a/pkg/config/config_template.yaml b/pkg/config/config_template.yaml index e01a2e747831b..10d7ac848eb3f 100644 --- a/pkg/config/config_template.yaml +++ b/pkg/config/config_template.yaml @@ -885,6 +885,8 @@ api_key: # enabled: false {{ end }} + + {{ end -}} {{- if .LogsAgent }} @@ -1013,12 +1015,18 @@ api_key: # # max_message_size_bytes: 256000 - ## @param integrations_logs_files_max_size - integer - optional - default: 100 - ## @env DD_LOGS_CONFIG_INTEGRATIONS_LOGS_FILES_MAX_SIZE - integer - optional - default: 100 - ## The combined size in MB of all the integration logs files the Agent is allowed to write. + ## @param integrations_logs_files_max_size - integer - optional - default: 10 + ## @env DD_LOGS_CONFIG_INTEGRATIONS_LOGS_FILES_MAX_SIZE - integer - optional - default: 10 + ## The max size in MB that an integration logs file is allowed to use # # integrations_logs_files_max_size + ## @param integrations_logs_total_usage - integer - optional - default: 100 + ## @env DD_LOGS_CONFIG_INTEGRATIONS_LOGS_TOTAL_USAGE - integer - optional - default: 100 + ## The total combined usage all integrations logs files can use + # + # integrations_logs_total_usage + {{ end -}} {{- if .TraceAgent }} @@ -1361,7 +1369,7 @@ api_key: ## The list of items available under apm_config.features is not guaranteed to persist across versions; ## a feature may eventually be promoted to its own configuration option on the agent, or dropped entirely. # - # features: ["error_rare_sample_tracer_drop","table_names","component2name","sql_cache","enable_otlp_compute_top_level_by_span_kind"] + # features: ["error_rare_sample_tracer_drop","table_names","component2name","sql_cache","sqllexer","enable_otlp_compute_top_level_by_span_kind"] ## @param additional_endpoints - object - optional ## @env DD_APM_ADDITIONAL_ENDPOINTS - object - optional @@ -1483,6 +1491,15 @@ api_key: # # process_config: + {{- if (eq .OS "linux")}} + ## @param run_in_core_agent - custom object - optional + ## Controls whether the process Agent or core Agent collects process and/or container information (Linux only). + # run_in_core_agent: + ## @param enabled - boolean - optional - default: false + ## Enables process/container collection on the core Agent instead of the process Agent. + # enabled: false + {{ end }} + ## @param process_collection - custom object - optional ## Specifies settings for collecting processes. # process_collection: @@ -1708,6 +1725,28 @@ api_key: ## # check_max_events_per_run: 100 {{ end -}} + +{{- if .SBOM }} +## @param sbom - custom object - optional +## Enter specific configuration for the Cloud Security Management Vulnerability Management feature +# sbom: + ## @param enabled - boolean - optional - default: false + ## set to true to enable Cloud Security Management Vulnerability Management + # enabled: false + + ## uncomment the sections below to enable where the vulnerability scanning is done + + ## @param enabled - boolean - optional - default: false + ## set to true to enable Infrastructure Vulnerabiltilies + # host: + # enabled: false +{{- if (eq .OS "linux")}} + + + # container_image: + # enabled: false +{{ end -}} +{{ end -}} {{- if .SystemProbe }} ################################## @@ -2987,6 +3026,15 @@ api_key: # # trace_agent_socket: unix:///var/run/datadog/apm.socket + ## @param type_socket_volumes - boolean - optional - default: false + ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_TYPE_SOCKET_VOLUMES - boolean - optional - default: false + ## When enabled, injected volumes are of type "Socket". This means that + ## injected pods will not start until the Agent creates the dogstatsd and + ## trace-agent sockets. This ensures no lost traces or dogstatsd metrics but + ## can cause the pod to wait if the agent has issues creating the sockets. + # + # type_socket_volumes: false + ## @param inject_tags - custom object - optional ## Tags injection parameters. # @@ -4386,22 +4434,13 @@ api_key: ## Debug-specific configuration for OTLP ingest in the Datadog Agent. ## This template lists the most commonly used settings; see the OpenTelemetry Collector documentation ## for a full list of available settings: - ## https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/loggingexporter#getting-started + ## https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/debugexporter#getting-started # # debug: - - ## Deprecated (v[6/7].41.0) - use `verbosity` instead - ## @param loglevel - string - optional - default: none - ## @env DD_OTLP_CONFIG_DEBUG_LOGLEVEL - string - optional - default: none - ## Verbosity of debug logs when Datadog Agent receives otlp traces/metrics. - ## Valid values are disabled, debug, info, error, warn. - # - # loglevel: info - ## @param verbosity - string - optional - default: normal ## @env DD_OTLP_CONFIG_DEBUG_VERBOSITY - string - optional - default: normal ## Verbosity of debug logs when Datadog Agent receives otlp traces/metrics. - ## Valid values are basic, normal, detailed. + ## Valid values are basic, normal, detailed, none. # # verbosity: normal {{- if (eq .OS "windows")}} diff --git a/pkg/config/fetcher/from_processes.go b/pkg/config/fetcher/from_processes.go index 02408be61b519..5b8088f41d970 100644 --- a/pkg/config/fetcher/from_processes.go +++ b/pkg/config/fetcher/from_processes.go @@ -10,7 +10,6 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/cmd/system-probe/api/client" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/api/util" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" @@ -107,19 +106,3 @@ func ProcessAgentConfig(config config.Reader, getEntireConfig bool) (string, err return client.FullConfig() } - -// SystemProbeConfig fetch the configuration from the system-probe process by querying its API -func SystemProbeConfig(config config.Reader) (string, error) { - hc := client.Get(config.GetString("system_probe_config.sysprobe_socket")) - - c := settingshttp.NewClient(hc, "http://localhost/config", "system-probe", settingshttp.NewHTTPClientOptions(util.CloseConnection)) - return c.FullConfig() -} - -// SystemProbeConfigBySource fetch the all configuration layers from the system-probe process by querying its API -func SystemProbeConfigBySource(config config.Reader) (string, error) { - hc := client.Get(config.GetString("system_probe_config.sysprobe_socket")) - - c := settingshttp.NewClient(hc, "http://localhost/config", "system-probe", settingshttp.NewHTTPClientOptions(util.CloseConnection)) - return c.FullConfigBySource() -} diff --git a/pkg/config/fetcher/sysprobe/from_sysprobe.go b/pkg/config/fetcher/sysprobe/from_sysprobe.go new file mode 100644 index 0000000000000..c1218799fc072 --- /dev/null +++ b/pkg/config/fetcher/sysprobe/from_sysprobe.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package sysprobe is a collection of high level helpers to pull the configuration from system-probe. +// It is separated from the other helpers in the parent package to avoid unnecessary imports in processes +// that have no need to directly communicate with system-probe. +package sysprobe + +import ( + "github.com/DataDog/datadog-agent/cmd/system-probe/api/client" + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/pkg/api/util" + settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" +) + +// SystemProbeConfig fetch the configuration from the system-probe process by querying its API +func SystemProbeConfig(config config.Reader) (string, error) { + hc := client.Get(config.GetString("system_probe_config.sysprobe_socket")) + + c := settingshttp.NewClient(hc, "http://localhost/config", "system-probe", settingshttp.NewHTTPClientOptions(util.CloseConnection)) + return c.FullConfig() +} + +// SystemProbeConfigBySource fetch the all configuration layers from the system-probe process by querying its API +func SystemProbeConfigBySource(config config.Reader) (string, error) { + hc := client.Get(config.GetString("system_probe_config.sysprobe_socket")) + + c := settingshttp.NewClient(hc, "http://localhost/config", "system-probe", settingshttp.NewHTTPClientOptions(util.CloseConnection)) + return c.FullConfigBySource() +} diff --git a/pkg/config/legacy/converter.go b/pkg/config/legacy/converter.go index be8fce6d99443..f64ca2eb1f19f 100644 --- a/pkg/config/legacy/converter.go +++ b/pkg/config/legacy/converter.go @@ -13,13 +13,30 @@ import ( "strconv" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" ) +// ConfigConverter is used in the legacy package +// to convert A5 config to A6 +type ConfigConverter struct { + model.Config +} + +// Set is used for setting configuration from A5 config +func (c *ConfigConverter) Set(key string, value interface{}) { + c.Config.Set(key, value, model.SourceAgentRuntime) +} + +// NewConfigConverter is creating and returning a config converter +func NewConfigConverter() *ConfigConverter { + return &ConfigConverter{pkgconfigsetup.Datadog()} +} + // FromAgentConfig reads the old agentConfig configuration, converts and merges // the values into the current configuration object -func FromAgentConfig(agentConfig Config, converter *config.LegacyConfigConverter) error { +func FromAgentConfig(agentConfig Config, converter *ConfigConverter) error { if err := extractURLAPIKeys(agentConfig, converter); err != nil { return err } @@ -84,8 +101,8 @@ func FromAgentConfig(agentConfig Config, converter *config.LegacyConfigConverter if agentConfig["service_discovery_backend"] == "docker" { // `docker` is the only possible value also on the Agent v5 - dockerListener := config.Listeners{Name: "docker"} - converter.Set("listeners", []config.Listeners{dockerListener}) + dockerListener := pkgconfigsetup.Listeners{Name: "docker"} + converter.Set("listeners", []pkgconfigsetup.Listeners{dockerListener}) } if providers, err := buildConfigProviders(agentConfig); err == nil { @@ -160,7 +177,7 @@ func FromAgentConfig(agentConfig Config, converter *config.LegacyConfigConverter return extractTraceAgentConfig(agentConfig, converter) } -func extractTraceAgentConfig(agentConfig Config, converter *config.LegacyConfigConverter) error { +func extractTraceAgentConfig(agentConfig Config, converter *ConfigConverter) error { for iniKey, yamlKey := range map[string]string{ "trace.api.api_key": "apm_config.api_key", "trace.api.endpoint": "apm_config.apm_dd_url", @@ -233,7 +250,7 @@ func isAffirmative(value string) (bool, error) { return v == "true" || v == "yes" || v == "1", nil } -func extractURLAPIKeys(agentConfig Config, converter *config.LegacyConfigConverter) error { +func extractURLAPIKeys(agentConfig Config, converter *ConfigConverter) error { urls := strings.Split(agentConfig["dd_url"], ",") keys := strings.Split(agentConfig["api_key"], ",") @@ -323,7 +340,7 @@ func buildSyslogURI(agentConfig Config) string { return host } -func buildConfigProviders(agentConfig Config) ([]config.ConfigurationProviders, error) { +func buildConfigProviders(agentConfig Config) ([]pkgconfigsetup.ConfigurationProviders, error) { // the list of SD_CONFIG_BACKENDS supported in v5 SdConfigBackends := map[string]struct{}{ "etcd": {}, @@ -340,7 +357,7 @@ func buildConfigProviders(agentConfig Config) ([]config.ConfigurationProviders, url = url + ":" + agentConfig["sd_backend_port"] } - cp := config.ConfigurationProviders{ + cp := pkgconfigsetup.ConfigurationProviders{ Username: agentConfig["sd_backend_username"], Password: agentConfig["sd_backend_password"], TemplateURL: url, @@ -358,7 +375,7 @@ func buildConfigProviders(agentConfig Config) ([]config.ConfigurationProviders, cp.Name = "zookeeper" // name is different in v6 } - return []config.ConfigurationProviders{cp}, nil + return []pkgconfigsetup.ConfigurationProviders{cp}, nil } func buildHistogramAggregates(agentConfig Config) []string { diff --git a/pkg/config/legacy/converter_test.go b/pkg/config/legacy/converter_test.go index c1eb5ab27ec14..30a02255a8a30 100644 --- a/pkg/config/legacy/converter_test.go +++ b/pkg/config/legacy/converter_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestIsAffirmative(t *testing.T) { @@ -200,15 +200,15 @@ func TestBuildHistogramPercentiles(t *testing.T) { } func TestDefaultValues(t *testing.T) { - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() agentConfig := make(Config) FromAgentConfig(agentConfig, configConverter) - assert.Equal(t, true, config.Datadog().GetBool("hostname_fqdn")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("hostname_fqdn")) } func TestTraceIgnoreResources(t *testing.T) { require := require.New(t) - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() cases := []struct { config string @@ -225,19 +225,19 @@ func TestTraceIgnoreResources(t *testing.T) { cfg["trace.ignore.resource"] = c.config err := FromAgentConfig(cfg, configConverter) require.NoError(err) - require.Equal(c.expected, config.Datadog().GetStringSlice("apm_config.ignore_resources")) + require.Equal(c.expected, pkgconfigsetup.Datadog().GetStringSlice("apm_config.ignore_resources")) } } func TestConverter(t *testing.T) { require := require.New(t) - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() cfg, err := GetAgentConfig("./tests/datadog.conf") require.NoError(err) err = FromAgentConfig(cfg, configConverter) require.NoError(err) - c := config.Datadog() + c := pkgconfigsetup.Datadog() require.Equal([]string{ "GET|POST /healthcheck", "GET /V1", @@ -317,7 +317,7 @@ func TestConverter(t *testing.T) { } func TestExtractURLAPIKeys(t *testing.T) { - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() defer func() { configConverter.Set("dd_url", "") configConverter.Set("api_key", "") @@ -330,28 +330,28 @@ func TestExtractURLAPIKeys(t *testing.T) { agentConfig["api_key"] = "" err := extractURLAPIKeys(agentConfig, configConverter) assert.NoError(t, err) - assert.Equal(t, "", config.Datadog().GetString("dd_url")) - assert.Equal(t, "", config.Datadog().GetString("api_key")) - assert.Empty(t, config.Datadog().GetStringMapStringSlice("additional_endpoints")) + assert.Equal(t, "", pkgconfigsetup.Datadog().GetString("dd_url")) + assert.Equal(t, "", pkgconfigsetup.Datadog().GetString("api_key")) + assert.Empty(t, pkgconfigsetup.Datadog().GetStringMapStringSlice("additional_endpoints")) // one url and one key agentConfig["dd_url"] = "https://datadoghq.com" agentConfig["api_key"] = "123456789" err = extractURLAPIKeys(agentConfig, configConverter) assert.NoError(t, err) - assert.Equal(t, "https://datadoghq.com", config.Datadog().GetString("dd_url")) - assert.Equal(t, "123456789", config.Datadog().GetString("api_key")) - assert.Empty(t, config.Datadog().GetStringMapStringSlice("additional_endpoints")) + assert.Equal(t, "https://datadoghq.com", pkgconfigsetup.Datadog().GetString("dd_url")) + assert.Equal(t, "123456789", pkgconfigsetup.Datadog().GetString("api_key")) + assert.Empty(t, pkgconfigsetup.Datadog().GetStringMapStringSlice("additional_endpoints")) // multiple dd_url and api_key agentConfig["dd_url"] = "https://datadoghq.com,https://datadoghq.com,https://datadoghq.com,https://staging.com" agentConfig["api_key"] = "123456789,abcdef,secret_key,secret_key2" err = extractURLAPIKeys(agentConfig, configConverter) assert.NoError(t, err) - assert.Equal(t, "https://datadoghq.com", config.Datadog().GetString("dd_url")) - assert.Equal(t, "123456789", config.Datadog().GetString("api_key")) + assert.Equal(t, "https://datadoghq.com", pkgconfigsetup.Datadog().GetString("dd_url")) + assert.Equal(t, "123456789", pkgconfigsetup.Datadog().GetString("api_key")) - endpoints := config.Datadog().GetStringMapStringSlice("additional_endpoints") + endpoints := pkgconfigsetup.Datadog().GetStringMapStringSlice("additional_endpoints") assert.Equal(t, 2, len(endpoints)) assert.Equal(t, []string{"abcdef", "secret_key"}, endpoints["https://datadoghq.com"]) assert.Equal(t, []string{"secret_key2"}, endpoints["https://staging.com"]) diff --git a/pkg/config/legacy/docker.go b/pkg/config/legacy/docker.go index 567bd07966f6c..3c8b7ab49f49d 100644 --- a/pkg/config/legacy/docker.go +++ b/pkg/config/legacy/docker.go @@ -15,7 +15,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/docker" - "github.com/DataDog/datadog-agent/pkg/config" yaml "gopkg.in/yaml.v2" ) @@ -64,7 +63,7 @@ type legacyDockerInstance struct { // ImportDockerConf read the configuration from docker_daemon check (agent5) // and create the configuration for the new docker check (agent 6) and move // needed option to datadog.yaml -func ImportDockerConf(src, dst string, overwrite bool, converter *config.LegacyConfigConverter) error { +func ImportDockerConf(src, dst string, overwrite bool, converter *ConfigConverter) error { fmt.Printf("%s\n", warningNewCheck) // read docker_daemon.yaml diff --git a/pkg/config/legacy/docker_test.go b/pkg/config/legacy/docker_test.go index 161f14c308fab..0163b4a5ed9d5 100644 --- a/pkg/config/legacy/docker_test.go +++ b/pkg/config/legacy/docker_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ( @@ -89,7 +89,7 @@ func TestConvertDocker(t *testing.T) { err := os.WriteFile(src, []byte(dockerDaemonLegacyConf), 0640) require.NoError(t, err) - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() err = ImportDockerConf(src, dst, true, configConverter) require.NoError(t, err) @@ -98,15 +98,15 @@ func TestConvertDocker(t *testing.T) { assert.Equal(t, dockerNewConf, string(newConf)) - assert.Equal(t, true, config.Datadog().GetBool("exclude_pause_container")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("exclude_pause_container")) assert.Equal(t, []string{"name:test", "name:some_image.*", "image:some_image_2", "image:some_image_3"}, - config.Datadog().GetStringSlice("ac_exclude")) - assert.Equal(t, []string{"image:some_image_3"}, config.Datadog().GetStringSlice("ac_include")) + pkgconfigsetup.Datadog().GetStringSlice("ac_exclude")) + assert.Equal(t, []string{"image:some_image_3"}, pkgconfigsetup.Datadog().GetStringSlice("ac_include")) - assert.Equal(t, "/host/test/proc", config.Datadog().GetString("container_proc_root")) - assert.Equal(t, "/host/test/sys/fs/cgroup", config.Datadog().GetString("container_cgroup_root")) + assert.Equal(t, "/host/test/proc", pkgconfigsetup.Datadog().GetString("container_proc_root")) + assert.Equal(t, "/host/test/sys/fs/cgroup", pkgconfigsetup.Datadog().GetString("container_cgroup_root")) assert.Equal(t, map[string]string{"test1": "test1", "test2": "test2"}, - config.Datadog().GetStringMapString("docker_labels_as_tags")) + pkgconfigsetup.Datadog().GetStringMapString("docker_labels_as_tags")) // test overwrite err = ImportDockerConf(src, dst, false, configConverter) diff --git a/pkg/config/legacy/kubernetes.go b/pkg/config/legacy/kubernetes.go index fdda56b95a00b..92be3537ac353 100644 --- a/pkg/config/legacy/kubernetes.go +++ b/pkg/config/legacy/kubernetes.go @@ -13,7 +13,6 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" - "github.com/DataDog/datadog-agent/pkg/config" yaml "gopkg.in/yaml.v2" ) @@ -83,13 +82,13 @@ func (k kubeDeprecations) print() { // ImportKubernetesConf reads the configuration from the kubernetes check (agent5) // and create the configuration for the new kubelet check (agent 6) and moves // relevant options to datadog.yaml -func ImportKubernetesConf(src, dst string, overwrite bool, converter *config.LegacyConfigConverter) error { +func ImportKubernetesConf(src, dst string, overwrite bool, converter *ConfigConverter) error { _, err := importKubernetesConfWithDeprec(src, dst, overwrite, converter) return err } // Deprecated options are listed in the kubeDeprecations return value, for testing -func importKubernetesConfWithDeprec(src, dst string, overwrite bool, converter *config.LegacyConfigConverter) (kubeDeprecations, error) { +func importKubernetesConfWithDeprec(src, dst string, overwrite bool, converter *ConfigConverter) (kubeDeprecations, error) { fmt.Printf("%s\n", warningNewKubeCheck) deprecations := make(kubeDeprecations) diff --git a/pkg/config/legacy/kubernetes_test.go b/pkg/config/legacy/kubernetes_test.go index 6facf05d09564..0e8495f097fc8 100644 --- a/pkg/config/legacy/kubernetes_test.go +++ b/pkg/config/legacy/kubernetes_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ( @@ -118,7 +118,7 @@ func TestConvertKubernetes(t *testing.T) { err = os.WriteFile(srcEmpty, []byte(kubernetesLegacyEmptyConf), 0640) require.NoError(t, err) - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() deprecations, err := importKubernetesConfWithDeprec(src, dst, true, configConverter) require.NoError(t, err) require.EqualValues(t, expectedKubeDeprecations, deprecations) @@ -127,26 +127,26 @@ func TestConvertKubernetes(t *testing.T) { require.NoError(t, err) assert.Equal(t, kubeletNewConf, string(newConf)) - assert.Equal(t, 1234, config.Datadog().GetInt("kubernetes_http_kubelet_port")) - assert.Equal(t, 1234, config.Datadog().GetInt("kubernetes_https_kubelet_port")) - assert.Equal(t, "localhost", config.Datadog().GetString("kubernetes_kubelet_host")) - assert.Equal(t, "/path/to/client.crt", config.Datadog().GetString("kubelet_client_crt")) - assert.Equal(t, "/path/to/client.key", config.Datadog().GetString("kubelet_client_key")) - assert.Equal(t, "/path/to/ca.pem", config.Datadog().GetString("kubelet_client_ca")) - assert.Equal(t, "/path/to/token", config.Datadog().GetString("kubelet_auth_token_path")) - assert.EqualValues(t, expectedHostTags, config.Datadog().GetStringMapString("kubernetes_node_labels_as_tags")) - assert.Equal(t, false, config.Datadog().GetBool("kubelet_tls_verify")) - - assert.Equal(t, true, config.Datadog().GetBool("kubernetes_collect_service_tags")) - assert.Equal(t, true, config.Datadog().GetBool("collect_kubernetes_events")) - assert.Equal(t, true, config.Datadog().GetBool("leader_election")) - assert.Equal(t, 1200, config.Datadog().GetInt("leader_lease_duration")) - assert.Equal(t, 3000, config.Datadog().GetInt("kubernetes_service_tag_update_freq")) + assert.Equal(t, 1234, pkgconfigsetup.Datadog().GetInt("kubernetes_http_kubelet_port")) + assert.Equal(t, 1234, pkgconfigsetup.Datadog().GetInt("kubernetes_https_kubelet_port")) + assert.Equal(t, "localhost", pkgconfigsetup.Datadog().GetString("kubernetes_kubelet_host")) + assert.Equal(t, "/path/to/client.crt", pkgconfigsetup.Datadog().GetString("kubelet_client_crt")) + assert.Equal(t, "/path/to/client.key", pkgconfigsetup.Datadog().GetString("kubelet_client_key")) + assert.Equal(t, "/path/to/ca.pem", pkgconfigsetup.Datadog().GetString("kubelet_client_ca")) + assert.Equal(t, "/path/to/token", pkgconfigsetup.Datadog().GetString("kubelet_auth_token_path")) + assert.EqualValues(t, expectedHostTags, pkgconfigsetup.Datadog().GetStringMapString("kubernetes_node_labels_as_tags")) + assert.Equal(t, false, pkgconfigsetup.Datadog().GetBool("kubelet_tls_verify")) + + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("kubernetes_collect_service_tags")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("collect_kubernetes_events")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("leader_election")) + assert.Equal(t, 1200, pkgconfigsetup.Datadog().GetInt("leader_lease_duration")) + assert.Equal(t, 3000, pkgconfigsetup.Datadog().GetInt("kubernetes_service_tag_update_freq")) configConverter.Set("kubelet_tls_verify", true) deprecations, err = importKubernetesConfWithDeprec(srcEmpty, dstEmpty, true, configConverter) require.NoError(t, err) - assert.Equal(t, true, config.Datadog().GetBool("kubelet_tls_verify")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("kubelet_tls_verify")) assert.Equal(t, 0, len(deprecations)) newEmptyConf, err := os.ReadFile(dstEmpty) require.NoError(t, err) diff --git a/pkg/config/legacy/no_docker.go b/pkg/config/legacy/no_docker.go index 3932b6e5cc658..6941bdac3bfbf 100644 --- a/pkg/config/legacy/no_docker.go +++ b/pkg/config/legacy/no_docker.go @@ -9,12 +9,10 @@ package legacy import ( "fmt" - - "github.com/DataDog/datadog-agent/pkg/config" ) // ImportDockerConf is a place holder if the agent is built without the docker flag -func ImportDockerConf(_, _ string, _ bool, _ *config.LegacyConfigConverter) error { +func ImportDockerConf(_, _ string, _ bool, _ *ConfigConverter) error { fmt.Println("This agent was build without docker support: could not convert docker_daemon.yaml") return nil } diff --git a/pkg/config/legacy_converter.go b/pkg/config/legacy_converter.go deleted file mode 100644 index f0b173070893a..0000000000000 --- a/pkg/config/legacy_converter.go +++ /dev/null @@ -1,27 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package config - -import ( - "github.com/DataDog/datadog-agent/pkg/config/model" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" -) - -// LegacyConfigConverter is used in the legacy package -// to convert A5 config to A6 -type LegacyConfigConverter struct { - Config -} - -// Set is used for setting configuration from A5 config -func (c *LegacyConfigConverter) Set(key string, value interface{}) { - c.Config.Set(key, value, model.SourceAgentRuntime) -} - -// NewConfigConverter is creating and returning a config converter -func NewConfigConverter() *LegacyConfigConverter { - return &LegacyConfigConverter{pkgconfigsetup.Datadog()} -} diff --git a/pkg/config/mock/go.mod b/pkg/config/mock/go.mod index 88d2f94e90167..26aa6790a3243 100644 --- a/pkg/config/mock/go.mod +++ b/pkg/config/mock/go.mod @@ -2,49 +2,32 @@ module github.com/DataDog/datadog-agent/pkg/config/mock go 1.22.0 -replace github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber - -replace github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable - -replace github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults - -replace github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup - -replace github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log - -replace github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem - -replace github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional - -replace github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model - -replace github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system - -replace github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env - -replace github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate - -replace github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../pkg/util/winutil - -replace github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer - -replace github.com/DataDog/datadog-agent/comp/core/secrets => ../../../comp/core/secrets - -replace github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../pkg/util/system/socket - -replace github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def - -replace github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../comp/core/flare/builder - -replace github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../comp/core/flare/types - -replace github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry - -replace github.com/DataDog/datadog-agent/comp/def => ../../../comp/def - -replace github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil - -replace github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../pkg/util/testutil +replace ( + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def + github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../comp/core/flare/builder + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../comp/core/flare/types + github.com/DataDog/datadog-agent/comp/core/secrets => ../../../comp/core/secrets + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry + github.com/DataDog/datadog-agent/comp/def => ../../../comp/def + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel + github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../pkg/util/testutil + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../pkg/util/winutil +) require ( github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 @@ -56,6 +39,8 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect @@ -84,7 +69,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -92,12 +77,12 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/config/mock/go.sum b/pkg/config/mock/go.sum index 77ba213060c82..765bdc23a7bf4 100644 --- a/pkg/config/mock/go.sum +++ b/pkg/config/mock/go.sum @@ -180,8 +180,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -251,15 +252,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +296,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -312,8 +313,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/config/model/go.mod b/pkg/config/model/go.mod index 6d714751b915c..beb9f68bd7c75 100644 --- a/pkg/config/model/go.mod +++ b/pkg/config/model/go.mod @@ -3,6 +3,7 @@ module github.com/DataDog/datadog-agent/pkg/config/model go 1.22.0 replace ( + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure/ github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log/ github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket/ diff --git a/pkg/config/model/viper.go b/pkg/config/model/viper.go index f4266c10de8f2..8b4477cdfe55f 100644 --- a/pkg/config/model/viper.go +++ b/pkg/config/model/viper.go @@ -161,12 +161,25 @@ func (c *safeConfig) SetDefault(key string, value interface{}) { c.Viper.SetDefault(key, value) } -// UnsetForSource wraps Viper for concurrent access +// UnsetForSource unsets a config entry for a given source func (c *safeConfig) UnsetForSource(key string, source Source) { + // modify the config then release the lock to avoid deadlocks while notifying + var receivers []NotificationReceiver c.Lock() - defer c.Unlock() + previousValue := c.Viper.Get(key) c.configSources[source].Set(key, nil) c.mergeViperInstances(key) + newValue := c.Viper.Get(key) // Can't use nil, so we get the newly computed value + if previousValue != nil { + // if the value has not changed, do not duplicate the slice so that no callback is called + receivers = slices.Clone(c.notificationReceivers) + } + c.Unlock() + + // notifying all receiver about the updated setting + for _, receiver := range receivers { + receiver(key, previousValue, newValue) + } } // mergeViperInstances is called after a change in an instance of Viper @@ -640,6 +653,8 @@ func (c *safeConfig) MergeConfig(in io.Reader) error { // MergeFleetPolicy merges the configuration from the reader given with an existing config // it overrides the existing values with the new ones in the FleetPolicies source, and updates the main config // according to sources priority order. +// +// Note: this should only be called at startup, as notifiers won't receive a notification when this loads func (c *safeConfig) MergeFleetPolicy(configPath string) error { c.Lock() defer c.Unlock() diff --git a/pkg/config/model/viper_test.go b/pkg/config/model/viper_test.go index 83bec5ee7a66b..22209d122c8fd 100644 --- a/pkg/config/model/viper_test.go +++ b/pkg/config/model/viper_test.go @@ -449,3 +449,20 @@ func TestParseEnvAsSliceMapString(t *testing.T) { t.Setenv("DD_MAP", "__some_data__") assert.Equal(t, []map[string]string{{"a": "a", "b": "b", "c": "c"}}, config.Get("map")) } + +func TestListenersUnsetForSource(t *testing.T) { + config := NewConfig("test", "DD", strings.NewReplacer(".", "_")) + + // Create a listener that will keep track of the changes + logLevels := []string{} + config.OnUpdate(func(_ string, _, next any) { + nextString := next.(string) + logLevels = append(logLevels, nextString) + }) + + config.Set("log_level", "info", SourceFile) + config.Set("log_level", "debug", SourceRC) + config.UnsetForSource("log_level", SourceRC) + + assert.Equal(t, []string{"info", "debug", "info"}, logLevels) +} diff --git a/pkg/config/nodetreemodel/config.go b/pkg/config/nodetreemodel/config.go new file mode 100644 index 0000000000000..2bef6c0b81c70 --- /dev/null +++ b/pkg/config/nodetreemodel/config.go @@ -0,0 +1,868 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package nodetreemodel defines a model for the config using a tree of nodes +package nodetreemodel + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "path/filepath" + + "github.com/DataDog/viper" + "github.com/mohae/deepcopy" + "github.com/spf13/afero" + "golang.org/x/exp/slices" + + "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// sources list the known sources, following the order of hierarchy between them +var sources = []model.Source{ + model.SourceDefault, + model.SourceUnknown, + model.SourceFile, + model.SourceEnvVar, + model.SourceFleetPolicies, + model.SourceAgentRuntime, + model.SourceLocalConfigProcess, + model.SourceRC, + model.SourceCLI, +} + +// safeConfig implements Config: +// - wraps viper with a safety lock +// - implements the additional DDHelpers +type safeConfig struct { + *viper.Viper + configSources map[model.Source]*viper.Viper + sync.RWMutex + envPrefix string + envKeyReplacer *strings.Replacer + + notificationReceivers []model.NotificationReceiver + + // Proxy settings + proxies *model.Proxy + + // configEnvVars is the set of env vars that are consulted for + // configuration values. + configEnvVars map[string]struct{} + + // keys that have been used but are unknown + // used to warn (a single time) on use + unknownKeys map[string]struct{} + + // extraConfigFilePaths represents additional configuration file paths that will be merged into the main configuration when ReadInConfig() is called. + extraConfigFilePaths []string +} + +// OnUpdate adds a callback to the list receivers to be called each time a value is changed in the configuration +// by a call to the 'Set' method. +// Callbacks are only called if the value is effectively changed. +func (c *safeConfig) OnUpdate(callback model.NotificationReceiver) { + c.Lock() + defer c.Unlock() + c.notificationReceivers = append(c.notificationReceivers, callback) +} + +// Set wraps Viper for concurrent access +func (c *safeConfig) Set(key string, newValue interface{}, source model.Source) { + if source == model.SourceDefault { + c.SetDefault(key, newValue) + return + } + + // modify the config then release the lock to avoid deadlocks while notifying + var receivers []model.NotificationReceiver + c.Lock() + previousValue := c.Viper.Get(key) + c.configSources[source].Set(key, newValue) + c.mergeViperInstances(key) + if !reflect.DeepEqual(previousValue, newValue) { + // if the value has not changed, do not duplicate the slice so that no callback is called + receivers = slices.Clone(c.notificationReceivers) + } + c.Unlock() + + // notifying all receiver about the updated setting + for _, receiver := range receivers { + receiver(key, previousValue, newValue) + } +} + +// SetWithoutSource sets the given value using source Unknown +func (c *safeConfig) SetWithoutSource(key string, value interface{}) { + c.Set(key, value, model.SourceUnknown) +} + +// SetDefault wraps Viper for concurrent access +func (c *safeConfig) SetDefault(key string, value interface{}) { + c.Lock() + defer c.Unlock() + c.configSources[model.SourceDefault].Set(key, value) + c.Viper.SetDefault(key, value) +} + +// UnsetForSource unsets a config entry for a given source +func (c *safeConfig) UnsetForSource(key string, source model.Source) { + // modify the config then release the lock to avoid deadlocks while notifying + var receivers []model.NotificationReceiver + c.Lock() + previousValue := c.Viper.Get(key) + c.configSources[source].Set(key, nil) + c.mergeViperInstances(key) + newValue := c.Viper.Get(key) // Can't use nil, so we get the newly computed value + if previousValue != nil { + // if the value has not changed, do not duplicate the slice so that no callback is called + receivers = slices.Clone(c.notificationReceivers) + } + c.Unlock() + + // notifying all receiver about the updated setting + for _, receiver := range receivers { + receiver(key, previousValue, newValue) + } +} + +// mergeViperInstances is called after a change in an instance of Viper +// to recompute the state of the main Viper +// (it must be used with a lock to prevent concurrent access to Viper) +func (c *safeConfig) mergeViperInstances(key string) { + var val interface{} + for _, source := range sources { + if currVal := c.configSources[source].Get(key); currVal != nil { + val = currVal + } + } + c.Viper.Set(key, val) +} + +// SetKnown adds a key to the set of known valid config keys +func (c *safeConfig) SetKnown(key string) { + c.Lock() + defer c.Unlock() + c.Viper.SetKnown(key) +} + +// IsKnown returns whether a key is known +func (c *safeConfig) IsKnown(key string) bool { + c.RLock() + defer c.RUnlock() + + return c.Viper.IsKnown(key) +} + +// checkKnownKey checks if a key is known, and if not logs a warning +// Only a single warning will be logged per unknown key. +// +// Must be called with the lock read-locked. +// The lock can be released and re-locked. +func (c *safeConfig) checkKnownKey(key string) { + if c.Viper.IsKnown(key) { + return + } + + if _, ok := c.unknownKeys[key]; ok { + return + } + + // need to write-lock to add the key to the unknownKeys map + c.RUnlock() + // but we need to have the lock in the same state (RLocked) at the end of the function + defer c.RLock() + + c.Lock() + c.unknownKeys[key] = struct{}{} + c.Unlock() + + // log without holding the lock + log.Warnf("config key %v is unknown", key) +} + +// GetKnownKeysLowercased returns all the keys that meet at least one of these criteria: +// 1) have a default, 2) have an environment variable binded or 3) have been SetKnown() +// Note that it returns the keys lowercased. +func (c *safeConfig) GetKnownKeysLowercased() map[string]interface{} { + c.RLock() + defer c.RUnlock() + + // GetKnownKeysLowercased returns a fresh map, so the caller may do with it + // as they please without holding the lock. + return c.Viper.GetKnownKeys() +} + +// ParseEnvAsStringSlice registers a transformer function to parse an an environment variables as a []string. +func (c *safeConfig) ParseEnvAsStringSlice(key string, fn func(string) []string) { + c.Lock() + defer c.Unlock() + c.Viper.SetEnvKeyTransformer(key, func(data string) interface{} { return fn(data) }) +} + +// ParseEnvAsMapStringInterface registers a transformer function to parse an an environment variables as a +// map[string]interface{}. +func (c *safeConfig) ParseEnvAsMapStringInterface(key string, fn func(string) map[string]interface{}) { + c.Lock() + defer c.Unlock() + c.Viper.SetEnvKeyTransformer(key, func(data string) interface{} { return fn(data) }) +} + +// ParseEnvAsSliceMapString registers a transformer function to parse an an environment variables as a []map[string]string. +func (c *safeConfig) ParseEnvAsSliceMapString(key string, fn func(string) []map[string]string) { + c.Lock() + defer c.Unlock() + c.Viper.SetEnvKeyTransformer(key, func(data string) interface{} { return fn(data) }) +} + +// ParseEnvAsSlice registers a transformer function to parse an an environment variables as a +// []interface{}. +func (c *safeConfig) ParseEnvAsSlice(key string, fn func(string) []interface{}) { + c.Lock() + defer c.Unlock() + c.Viper.SetEnvKeyTransformer(key, func(data string) interface{} { return fn(data) }) +} + +// SetFs wraps Viper for concurrent access +func (c *safeConfig) SetFs(fs afero.Fs) { + c.Lock() + defer c.Unlock() + c.Viper.SetFs(fs) +} + +// IsSet wraps Viper for concurrent access +func (c *safeConfig) IsSet(key string) bool { + c.RLock() + defer c.RUnlock() + return c.Viper.IsSet(key) +} + +func (c *safeConfig) AllKeysLowercased() []string { + c.RLock() + defer c.RUnlock() + return c.Viper.AllKeys() +} + +// Get wraps Viper for concurrent access +func (c *safeConfig) Get(key string) interface{} { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return deepcopy.Copy(val) +} + +// GetAllSources returns the value of a key for each source +func (c *safeConfig) GetAllSources(key string) []model.ValueWithSource { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + vals := make([]model.ValueWithSource, len(sources)) + for i, source := range sources { + vals[i] = model.ValueWithSource{ + Source: source, + Value: deepcopy.Copy(c.configSources[source].Get(key)), + } + } + return vals +} + +// GetString wraps Viper for concurrent access +func (c *safeConfig) GetString(key string) string { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetStringE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return val +} + +// GetBool wraps Viper for concurrent access +func (c *safeConfig) GetBool(key string) bool { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetBoolE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return val +} + +// GetInt wraps Viper for concurrent access +func (c *safeConfig) GetInt(key string) int { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetIntE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return val +} + +// GetInt32 wraps Viper for concurrent access +func (c *safeConfig) GetInt32(key string) int32 { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetInt32E(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return val +} + +// GetInt64 wraps Viper for concurrent access +func (c *safeConfig) GetInt64(key string) int64 { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetInt64E(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return val +} + +// GetFloat64 wraps Viper for concurrent access +func (c *safeConfig) GetFloat64(key string) float64 { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetFloat64E(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return val +} + +// GetTime wraps Viper for concurrent access +func (c *safeConfig) GetTime(key string) time.Time { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetTimeE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return val +} + +// GetDuration wraps Viper for concurrent access +func (c *safeConfig) GetDuration(key string) time.Duration { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetDurationE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return val +} + +// GetStringSlice wraps Viper for concurrent access +func (c *safeConfig) GetStringSlice(key string) []string { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetStringSliceE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return slices.Clone(val) +} + +// GetFloat64SliceE loads a key as a []float64 +func (c *safeConfig) GetFloat64SliceE(key string) ([]float64, error) { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + + // We're using GetStringSlice because viper can only parse list of string from env variables + list, err := c.Viper.GetStringSliceE(key) + if err != nil { + return nil, fmt.Errorf("'%v' is not a list", key) + } + + res := []float64{} + for _, item := range list { + nb, err := strconv.ParseFloat(item, 64) + if err != nil { + return nil, fmt.Errorf("value '%v' from '%v' is not a float64", item, key) + } + res = append(res, nb) + } + return res, nil +} + +// GetStringMap wraps Viper for concurrent access +func (c *safeConfig) GetStringMap(key string) map[string]interface{} { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetStringMapE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return deepcopy.Copy(val).(map[string]interface{}) +} + +// GetStringMapString wraps Viper for concurrent access +func (c *safeConfig) GetStringMapString(key string) map[string]string { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetStringMapStringE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return deepcopy.Copy(val).(map[string]string) +} + +// GetStringMapStringSlice wraps Viper for concurrent access +func (c *safeConfig) GetStringMapStringSlice(key string) map[string][]string { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetStringMapStringSliceE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return deepcopy.Copy(val).(map[string][]string) +} + +// GetSizeInBytes wraps Viper for concurrent access +func (c *safeConfig) GetSizeInBytes(key string) uint { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + val, err := c.Viper.GetSizeInBytesE(key) + if err != nil { + log.Warnf("failed to get configuration value for key %q: %s", key, err) + } + return val +} + +// GetSource wraps Viper for concurrent access +func (c *safeConfig) GetSource(key string) model.Source { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + var source model.Source + for _, s := range sources { + if c.configSources[s].Get(key) != nil { + source = s + } + } + return source +} + +// SetEnvPrefix wraps Viper for concurrent access, and keeps the envPrefix for +// future reference +func (c *safeConfig) SetEnvPrefix(in string) { + c.Lock() + defer c.Unlock() + c.configSources[model.SourceEnvVar].SetEnvPrefix(in) + c.Viper.SetEnvPrefix(in) + c.envPrefix = in +} + +// mergeWithEnvPrefix derives the environment variable that Viper will use for a given key. +// mergeWithEnvPrefix must be called while holding the config log (read or write). +func (c *safeConfig) mergeWithEnvPrefix(key string) string { + return strings.Join([]string{c.envPrefix, strings.ToUpper(key)}, "_") +} + +// BindEnv wraps Viper for concurrent access, and adds tracking of the configurable env vars +func (c *safeConfig) BindEnv(input ...string) { + c.Lock() + defer c.Unlock() + var envKeys []string + + // If one input is given, viper derives an env key from it; otherwise, all inputs after + // the first are literal env vars. + if len(input) == 1 { + envKeys = []string{c.mergeWithEnvPrefix(input[0])} + } else { + envKeys = input[1:] + } + + for _, key := range envKeys { + // apply EnvKeyReplacer to each key + if c.envKeyReplacer != nil { + key = c.envKeyReplacer.Replace(key) + } + c.configEnvVars[key] = struct{}{} + } + + _ = c.configSources[model.SourceEnvVar].BindEnv(input...) + _ = c.Viper.BindEnv(input...) +} + +// SetEnvKeyReplacer wraps Viper for concurrent access +func (c *safeConfig) SetEnvKeyReplacer(r *strings.Replacer) { + c.Lock() + defer c.Unlock() + c.configSources[model.SourceEnvVar].SetEnvKeyReplacer(r) + c.Viper.SetEnvKeyReplacer(r) + c.envKeyReplacer = r +} + +// UnmarshalKey wraps Viper for concurrent access +func (c *safeConfig) UnmarshalKey(key string, rawVal interface{}, opts ...viper.DecoderConfigOption) error { + c.RLock() + defer c.RUnlock() + c.checkKnownKey(key) + return c.Viper.UnmarshalKey(key, rawVal, opts...) +} + +// Unmarshal wraps Viper for concurrent access +func (c *safeConfig) Unmarshal(rawVal interface{}) error { + c.RLock() + defer c.RUnlock() + return c.Viper.Unmarshal(rawVal) +} + +// UnmarshalExact wraps Viper for concurrent access +func (c *safeConfig) UnmarshalExact(rawVal interface{}) error { + c.RLock() + defer c.RUnlock() + return c.Viper.UnmarshalExact(rawVal) +} + +// ReadInConfig wraps Viper for concurrent access +func (c *safeConfig) ReadInConfig() error { + c.Lock() + defer c.Unlock() + // ReadInConfig reset configuration with the main config file + err := errors.Join(c.Viper.ReadInConfig(), c.configSources[model.SourceFile].ReadInConfig()) + if err != nil { + return err + } + + type extraConf struct { + path string + content []byte + } + + // Read extra config files + extraConfContents := []extraConf{} + for _, path := range c.extraConfigFilePaths { + b, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("could not read extra config file '%s': %w", path, err) + } + extraConfContents = append(extraConfContents, extraConf{path: path, content: b}) + } + + // Merge with base config and 'file' config + for _, confFile := range extraConfContents { + err = errors.Join(c.Viper.MergeConfig(bytes.NewReader(confFile.content)), c.configSources[model.SourceFile].MergeConfig(bytes.NewReader(confFile.content))) + if err != nil { + return fmt.Errorf("error merging %s config file: %w", confFile.path, err) + } + log.Infof("extra configuration file %s was loaded successfully", confFile.path) + } + return nil +} + +// ReadConfig wraps Viper for concurrent access +func (c *safeConfig) ReadConfig(in io.Reader) error { + c.Lock() + defer c.Unlock() + b, err := io.ReadAll(in) + if err != nil { + return err + } + err = c.Viper.ReadConfig(bytes.NewReader(b)) + if err != nil { + return err + } + return c.configSources[model.SourceFile].ReadConfig(bytes.NewReader(b)) +} + +// MergeConfig wraps Viper for concurrent access +func (c *safeConfig) MergeConfig(in io.Reader) error { + c.Lock() + defer c.Unlock() + return c.Viper.MergeConfig(in) +} + +// MergeFleetPolicy merges the configuration from the reader given with an existing config +// it overrides the existing values with the new ones in the FleetPolicies source, and updates the main config +// according to sources priority order. +// +// Note: this should only be called at startup, as notifiers won't receive a notification when this loads +func (c *safeConfig) MergeFleetPolicy(configPath string) error { + c.Lock() + defer c.Unlock() + + // Check file existence & open it + _, err := os.Stat(configPath) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open config file %s: %w", configPath, err) + } else if err != nil && os.IsNotExist(err) { + return nil + } + in, err := os.Open(configPath) + if err != nil { + return fmt.Errorf("unable to open config file %s: %w", configPath, err) + } + defer in.Close() + + c.configSources[model.SourceFleetPolicies].SetConfigType("yaml") + err = c.configSources[model.SourceFleetPolicies].MergeConfigOverride(in) + if err != nil { + return err + } + for _, key := range c.configSources[model.SourceFleetPolicies].AllKeys() { + c.mergeViperInstances(key) + } + log.Infof("Fleet policies configuration %s successfully merged", path.Base(configPath)) + return nil +} + +// MergeConfigMap merges the configuration from the map given with an existing config. +// Note that the map given may be modified. +func (c *safeConfig) MergeConfigMap(cfg map[string]any) error { + c.Lock() + defer c.Unlock() + return c.Viper.MergeConfigMap(cfg) +} + +// AllSettings wraps Viper for concurrent access +func (c *safeConfig) AllSettings() map[string]interface{} { + c.RLock() + defer c.RUnlock() + + // AllSettings returns a fresh map, so the caller may do with it + // as they please without holding the lock. + return c.Viper.AllSettings() +} + +// AllSettingsWithoutDefault returns a copy of the all the settings in the configuration without defaults +func (c *safeConfig) AllSettingsWithoutDefault() map[string]interface{} { + c.RLock() + defer c.RUnlock() + + // AllSettingsWithoutDefault returns a fresh map, so the caller may do with it + // as they please without holding the lock. + return c.Viper.AllSettingsWithoutDefault() +} + +// AllSettingsBySource returns the settings from each source (file, env vars, ...) +func (c *safeConfig) AllSettingsBySource() map[model.Source]interface{} { + c.RLock() + defer c.RUnlock() + + sources := []model.Source{ + model.SourceDefault, + model.SourceUnknown, + model.SourceFile, + model.SourceEnvVar, + model.SourceFleetPolicies, + model.SourceAgentRuntime, + model.SourceRC, + model.SourceCLI, + model.SourceLocalConfigProcess, + } + res := map[model.Source]interface{}{} + for _, source := range sources { + res[source] = c.configSources[source].AllSettingsWithoutDefault() + } + res[model.SourceProvided] = c.Viper.AllSettingsWithoutDefault() + return res +} + +// AddConfigPath wraps Viper for concurrent access +func (c *safeConfig) AddConfigPath(in string) { + c.Lock() + defer c.Unlock() + c.configSources[model.SourceFile].AddConfigPath(in) + c.Viper.AddConfigPath(in) +} + +// AddExtraConfigPaths allows adding additional configuration files +// which will be merged into the main configuration during the ReadInConfig call. +// Configuration files are merged sequentially. If a key already exists and the foreign value type matches the existing one, the foreign value overrides it. +// If both the existing value and the new value are nested configurations, they are merged recursively following the same principles. +func (c *safeConfig) AddExtraConfigPaths(ins []string) error { + if len(ins) == 0 { + return nil + } + c.Lock() + defer c.Unlock() + var pathsToAdd []string + var errs []error + for _, in := range ins { + in, err := filepath.Abs(in) + if err != nil { + errs = append(errs, fmt.Errorf("could not get absolute path of extra config file '%s': %s", in, err)) + continue + } + if slices.Index(c.extraConfigFilePaths, in) == -1 && slices.Index(pathsToAdd, in) == -1 { + pathsToAdd = append(pathsToAdd, in) + } + } + err := errors.Join(errs...) + if err == nil { + c.extraConfigFilePaths = append(c.extraConfigFilePaths, pathsToAdd...) + } + return err +} + +// SetConfigName wraps Viper for concurrent access +func (c *safeConfig) SetConfigName(in string) { + c.Lock() + defer c.Unlock() + c.configSources[model.SourceFile].SetConfigName(in) + c.Viper.SetConfigName(in) +} + +// SetConfigFile wraps Viper for concurrent access +func (c *safeConfig) SetConfigFile(in string) { + c.Lock() + defer c.Unlock() + c.configSources[model.SourceFile].SetConfigFile(in) + c.Viper.SetConfigFile(in) +} + +// SetConfigType wraps Viper for concurrent access +func (c *safeConfig) SetConfigType(in string) { + c.Lock() + defer c.Unlock() + c.configSources[model.SourceFile].SetConfigType(in) + c.Viper.SetConfigType(in) +} + +// ConfigFileUsed wraps Viper for concurrent access +func (c *safeConfig) ConfigFileUsed() string { + c.RLock() + defer c.RUnlock() + return c.Viper.ConfigFileUsed() +} + +func (c *safeConfig) SetTypeByDefaultValue(in bool) { + c.Lock() + defer c.Unlock() + for _, source := range sources { + c.configSources[source].SetTypeByDefaultValue(in) + } + c.Viper.SetTypeByDefaultValue(in) +} + +// GetEnvVars implements the Config interface +func (c *safeConfig) GetEnvVars() []string { + c.RLock() + defer c.RUnlock() + vars := make([]string, 0, len(c.configEnvVars)) + for v := range c.configEnvVars { + vars = append(vars, v) + } + return vars +} + +// BindEnvAndSetDefault implements the Config interface +func (c *safeConfig) BindEnvAndSetDefault(key string, val interface{}, env ...string) { + c.SetDefault(key, val) + c.BindEnv(append([]string{key}, env...)...) //nolint:errcheck +} + +func (c *safeConfig) Warnings() *model.Warnings { + return nil +} + +func (c *safeConfig) Object() model.Reader { + return c +} + +// NewConfig returns a new Config object. +func NewConfig(name string, envPrefix string, envKeyReplacer *strings.Replacer) model.Config { + config := safeConfig{ + Viper: viper.New(), + configSources: map[model.Source]*viper.Viper{}, + configEnvVars: map[string]struct{}{}, + unknownKeys: map[string]struct{}{}, + } + + // load one Viper instance per source of setting change + for _, source := range sources { + config.configSources[source] = viper.New() + } + + config.SetTypeByDefaultValue(true) + config.SetConfigName(name) + config.SetEnvPrefix(envPrefix) + config.SetEnvKeyReplacer(envKeyReplacer) + + return &config +} + +// CopyConfig copies the given config to the receiver config. This should only be used in tests as replacing +// the global config reference is unsafe. +func (c *safeConfig) CopyConfig(cfg model.Config) { + c.Lock() + defer c.Unlock() + + if cfg, ok := cfg.(*safeConfig); ok { + c.Viper = cfg.Viper + c.configSources = cfg.configSources + c.envPrefix = cfg.envPrefix + c.envKeyReplacer = cfg.envKeyReplacer + c.proxies = cfg.proxies + c.configEnvVars = cfg.configEnvVars + c.unknownKeys = cfg.unknownKeys + c.notificationReceivers = cfg.notificationReceivers + return + } + panic("Replacement config must be an instance of safeConfig") +} + +// GetProxies returns the proxy settings from the configuration +func (c *safeConfig) GetProxies() *model.Proxy { + c.Lock() + defer c.Unlock() + if c.proxies != nil { + return c.proxies + } + if c.Viper.GetBool("fips.enabled") { + return nil + } + if !c.Viper.IsSet("proxy.http") && !c.Viper.IsSet("proxy.https") && !c.Viper.IsSet("proxy.no_proxy") { + return nil + } + p := &model.Proxy{ + HTTP: c.Viper.GetString("proxy.http"), + HTTPS: c.Viper.GetString("proxy.https"), + NoProxy: c.Viper.GetStringSlice("proxy.no_proxy"), + } + + c.proxies = p + return c.proxies +} + +func (c *safeConfig) ExtraConfigFilesUsed() []string { + c.Lock() + defer c.Unlock() + res := make([]string, len(c.extraConfigFilePaths)) + copy(res, c.extraConfigFilePaths) + return res +} diff --git a/pkg/config/nodetreemodel/go.mod b/pkg/config/nodetreemodel/go.mod new file mode 100644 index 0000000000000..cdb7d30bdb4b6 --- /dev/null +++ b/pkg/config/nodetreemodel/go.mod @@ -0,0 +1,35 @@ +module github.com/DataDog/datadog-agent/pkg/config/nodetreemodel + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber +) + +require ( + github.com/DataDog/datadog-agent/pkg/config/model v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/viper v1.13.5 + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 + github.com/spf13/afero v1.11.0 + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 +) + +require ( + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/spf13/cast v1.3.0 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.3 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/pkg/config/nodetreemodel/go.sum b/pkg/config/nodetreemodel/go.sum new file mode 100644 index 0000000000000..c576ff14e9a72 --- /dev/null +++ b/pkg/config/nodetreemodel/go.sum @@ -0,0 +1,254 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/pkg/config/remote/client/client.go b/pkg/config/remote/client/client.go index a97cecda5f899..124ce3de40f59 100644 --- a/pkg/config/remote/client/client.go +++ b/pkg/config/remote/client/client.go @@ -46,6 +46,12 @@ type ConfigFetcher interface { ClientGetConfigs(context.Context, *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error) } +// Listener defines the interface of a remote config listener +type Listener interface { + OnUpdate(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) + OnStateChange(bool) +} + // fetchConfigs defines the function that an agent client uses to get config updates type fetchConfigs func(context.Context, *pbgo.ClientGetConfigsRequest, ...grpc.CallOption) (*pbgo.ClientGetConfigsResponse, error) @@ -69,7 +75,7 @@ type Client struct { state *state.Repository - listeners map[string][]func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) + listeners map[string][]Listener // Elements that can be changed during the execution of listeners // They are atomics so that they don't have to share the top-level mutex @@ -160,9 +166,6 @@ func (g *agentGRPCConfigFetcher) ClientGetConfigs(ctx context.Context, request * return g.fetchConfigs(ctx, request) } -// Handler is a function that is called when a config update is received. -type Handler func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) - // NewClient creates a new client func NewClient(updater ConfigFetcher, opts ...func(o *Options)) (*Client, error) { return newClient(updater, opts...) @@ -289,7 +292,7 @@ func newClient(cf ConfigFetcher, opts ...func(opts *Options)) (*Client, error) { installerState: installerState, state: repository, backoffPolicy: backoffPolicy, - listeners: make(map[string][]func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))), + listeners: make(map[string][]Listener), configFetcher: cf, }, nil } @@ -324,8 +327,8 @@ func (c *Client) SetAgentName(agentName string) { } } -// Subscribe subscribes to config updates of a product. -func (c *Client) Subscribe(product string, fn func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) { +// SubscribeAll subscribes to all events (config updates, state changed, ...) +func (c *Client) SubscribeAll(product string, listener Listener) { c.m.Lock() defer c.m.Unlock() @@ -341,7 +344,12 @@ func (c *Client) Subscribe(product string, fn func(update map[string]state.RawCo c.products = append(c.products, product) } - c.listeners[product] = append(c.listeners[product], fn) + c.listeners[product] = append(c.listeners[product], listener) +} + +// Subscribe subscribes to config updates of a product. +func (c *Client) Subscribe(product string, cb func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) { + c.SubscribeAll(product, NewUpdateListener(cb)) } // GetConfigs returns the current configs applied of a product. @@ -428,11 +436,29 @@ func (c *Client) pollLoop() { log.Infof("retrying the first update of remote-config state (%v)", err) } } else { + c.m.Lock() + for _, productListeners := range c.listeners { + for _, listener := range productListeners { + listener.OnStateChange(false) + } + } + c.m.Unlock() + c.lastUpdateError = err c.backoffErrorCount = c.backoffPolicy.IncError(c.backoffErrorCount) log.Errorf("could not update remote-config state: %v", c.lastUpdateError) } } else { + if c.lastUpdateError != nil { + c.m.Lock() + for _, productListeners := range c.listeners { + for _, listener := range productListeners { + listener.OnStateChange(true) + } + } + c.m.Unlock() + } + c.lastUpdateError = nil successfulFirstRun = true c.backoffErrorCount = c.backoffPolicy.DecError(c.backoffErrorCount) @@ -470,7 +496,7 @@ func (c *Client) update() error { for product, productListeners := range c.listeners { if containsProduct(changedProducts, product) { for _, listener := range productListeners { - listener(c.state.GetConfigs(product), c.state.UpdateApplyStatus) + listener.OnUpdate(c.state.GetConfigs(product), c.state.UpdateApplyStatus) } } } @@ -594,6 +620,33 @@ func (c *Client) newUpdateRequest() (*pbgo.ClientGetConfigsRequest, error) { return req, nil } +type listener struct { + onUpdate func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) + onStateChange func(bool) +} + +func (l *listener) OnUpdate(configs map[string]state.RawConfig, cb func(cfgPath string, status state.ApplyStatus)) { + if l.onUpdate != nil { + l.onUpdate(configs, cb) + } +} + +func (l *listener) OnStateChange(state bool) { + if l.onStateChange != nil { + l.onStateChange(state) + } +} + +// NewUpdateListener creates a remote config listener from a update callback +func NewUpdateListener(onUpdate func(updates map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) Listener { + return &listener{onUpdate: onUpdate} +} + +// NewListener creates a remote config listener from a couple of update and state change callbacks +func NewListener(onUpdate func(updates map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)), onStateChange func(bool)) Listener { + return &listener{onUpdate: onUpdate, onStateChange: onStateChange} +} + var ( idSize = 21 idAlphabet = []rune("_-0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") diff --git a/pkg/config/remote/go.mod b/pkg/config/remote/go.mod index 87dbbcf5ebb2a..4cd1b9833f550 100644 --- a/pkg/config/remote/go.mod +++ b/pkg/config/remote/go.mod @@ -6,6 +6,7 @@ replace ( github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../comp/def github.com/DataDog/datadog-agent/pkg/config/model => ../model + github.com/DataDog/datadog-agent/pkg/obfuscate => ../../obfuscate github.com/DataDog/datadog-agent/pkg/proto => ../../proto github.com/DataDog/datadog-agent/pkg/remoteconfig/state => ../../remoteconfig/state github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry @@ -36,14 +37,27 @@ require ( go.etcd.io/bbolt v1.3.7 go.uber.org/atomic v1.11.0 google.golang.org/protobuf v1.33.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 ) require ( + github.com/DataDog/appsec-internal-go v1.7.0 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect github.com/DataDog/datadog-agent/pkg/util/cache v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-go/v5 v5.5.0 // indirect + github.com/DataDog/go-libddwaf/v3 v3.3.0 // indirect + github.com/DataDog/go-sqllexer v0.0.15 // indirect + github.com/DataDog/sketches-go v1.4.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/ebitengine/purego v0.6.0-alpha.5 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/google/uuid v1.5.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/outcaste-io/ristretto v0.2.3 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect @@ -53,6 +67,11 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.24.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect ) require ( @@ -60,17 +79,17 @@ require ( github.com/DataDog/go-tuf v1.1.0-0.5.2 github.com/DataDog/viper v1.13.5 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/fsnotify/fsnotify v1.4.7 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/magiconair/properties v1.8.1 // indirect - github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pelletier/go-toml v1.2.0 // indirect github.com/philhofer/fwd v1.1.2 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.3.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect diff --git a/pkg/config/remote/go.sum b/pkg/config/remote/go.sum index 61c0ef33e4625..270cfceba1d7b 100644 --- a/pkg/config/remote/go.sum +++ b/pkg/config/remote/go.sum @@ -44,12 +44,27 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0= +github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4= +github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec= +github.com/DataDog/go-sqllexer v0.0.15 h1:rUUu52dP8EQhJLnUw0MIAxZp0BQx2fOTuMztr3vtHUU= +github.com/DataDog/go-sqllexer v0.0.15/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= +github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= +github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= +github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE= +github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -66,6 +81,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -85,10 +102,21 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg= +github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds= +github.com/ebitengine/purego v0.6.0-alpha.5 h1:EYID3JOAdmQ4SNZYJHu9V6IqOeRQDBYxqKAg9PyoHFY= +github.com/ebitengine/purego v0.6.0-alpha.5/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -98,6 +126,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= +github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -123,6 +153,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -170,8 +201,12 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -185,10 +220,17 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -215,9 +257,12 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -227,6 +272,10 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwd github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= +github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= @@ -239,8 +288,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -260,12 +310,18 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY= +github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= @@ -276,10 +332,13 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= @@ -304,6 +363,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -325,6 +385,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= @@ -339,8 +400,11 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= @@ -392,8 +456,11 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -430,6 +497,7 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= @@ -456,8 +524,11 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -473,6 +544,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -495,11 +567,15 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -525,6 +601,8 @@ golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -577,12 +655,17 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -687,6 +770,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 h1:3Cb46zyKIlEWac21tvDF2O4KyMlOHQxrQkyiaUpdwM0= +gopkg.in/DataDog/dd-trace-go.v1 v1.67.0/go.mod h1:6DdiJPKOeJfZyd/IUGCAd5elY8qPGkztK6wbYYsMjag= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -714,6 +799,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw= +modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= +modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= +modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/config/remote/service/service.go b/pkg/config/remote/service/service.go index 61cc0e5d21e72..f09c59df87d88 100644 --- a/pkg/config/remote/service/service.go +++ b/pkg/config/remote/service/service.go @@ -17,6 +17,8 @@ import ( "errors" "expvar" "fmt" + "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" "net/url" "path" "strconv" @@ -65,6 +67,12 @@ const ( initialFetchErrorLog uint64 = 5 ) +const ( + // the minimum amount of time that must pass before a new cache + // bypass request is allowed for the CDN client + maxCDNUpdateFrequency = 50 * time.Second +) + var ( exportedMapStatus = expvar.NewMap("remoteConfigStatus") // Status expvar exported @@ -83,6 +91,75 @@ type Service struct { // via logs. rcType string + db *bbolt.DB +} + +func (s *Service) getNewDirectorRoots(uptane uptaneClient, currentVersion uint64, newVersion uint64) ([][]byte, error) { + var roots [][]byte + for i := currentVersion + 1; i <= newVersion; i++ { + root, err := uptane.DirectorRoot(i) + if err != nil { + return nil, err + } + canonicalRoot, err := enforceCanonicalJSON(root) + if err != nil { + return nil, err + } + roots = append(roots, canonicalRoot) + } + return roots, nil +} + +func (s *Service) getTargetFiles(uptane uptaneClient, products []rdata.Product, cachedTargetFiles []*pbgo.TargetFileMeta) ([]*pbgo.File, error) { + productSet := make(map[rdata.Product]struct{}) + for _, product := range products { + productSet[product] = struct{}{} + } + targets, err := uptane.Targets() + if err != nil { + return nil, err + } + cachedTargets := make(map[string]data.FileMeta) + for _, cachedTarget := range cachedTargetFiles { + hashes := make(data.Hashes) + for _, hash := range cachedTarget.Hashes { + h, err := hex.DecodeString(hash.Hash) + if err != nil { + return nil, err + } + hashes[hash.Algorithm] = h + } + cachedTargets[cachedTarget.Path] = data.FileMeta{ + Hashes: hashes, + Length: cachedTarget.Length, + } + } + var configFiles []*pbgo.File + for targetPath, targetMeta := range targets { + configPathMeta, err := rdata.ParseConfigPath(targetPath) + if err != nil { + return nil, err + } + if _, inClientProducts := productSet[rdata.Product(configPathMeta.Product)]; inClientProducts { + if notEqualErr := tufutil.FileMetaEqual(cachedTargets[targetPath], targetMeta.FileMeta); notEqualErr == nil { + continue + } + fileContents, err := uptane.TargetFile(targetPath) + if err != nil { + return nil, err + } + configFiles = append(configFiles, &pbgo.File{ + Path: targetPath, + Raw: fileContents, + }) + } + } + return configFiles, nil +} + +// CoreAgentService fetches Remote Configurations from the RC backend +type CoreAgentService struct { + Service firstUpdate bool defaultRefreshInterval time.Duration @@ -101,8 +178,7 @@ type Service struct { hostname string tagsGetter func() []string traceAgentEnv string - db *bbolt.DB - uptane uptaneClient + uptane coreAgentUptaneClient api api.API products map[rdata.Product]struct{} @@ -125,9 +201,8 @@ type Service struct { agentVersion string } -// uptaneClient is used to mock the uptane component for testing +// uptaneClient provides functions to get TUF/uptane repo data. type uptaneClient interface { - Update(response *pbgo.LatestConfigsResponse) error State() (uptane.State, error) DirectorRoot(version uint64) ([]byte, error) StoredOrgUUID() (string, error) @@ -138,6 +213,18 @@ type uptaneClient interface { TUFVersionState() (uptane.TUFVersions, error) } +// coreAgentUptaneClient provides functions to get TUF/uptane repo data and update the agent's state via the RC backend. +type coreAgentUptaneClient interface { + uptaneClient + Update(response *pbgo.LatestConfigsResponse) error +} + +// cdnUptaneClient provides functions to get TUF/uptane repo data and update the agent's state via the CDN. +type cdnUptaneClient interface { + uptaneClient + Update(ctx context.Context) error +} + // RcTelemetryReporter should be implemented by the agent to publish metrics on exceptional cache bypass request events type RcTelemetryReporter interface { // IncRateLimit is invoked when a cache bypass request is prevented due to rate limiting @@ -286,7 +373,7 @@ func WithClientTTL(interval time.Duration, cfgPath string) func(s *options) { } // NewService instantiates a new remote configuration management service -func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGetter func() []string, telemetryReporter RcTelemetryReporter, agentVersion string, opts ...Option) (*Service, error) { +func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGetter func() []string, telemetryReporter RcTelemetryReporter, agentVersion string, opts ...Option) (*CoreAgentService, error) { options := defaultOptions for _, opt := range opts { opt(&options) @@ -337,7 +424,7 @@ func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGette if authKeys.rcKeySet { opt = append(opt, uptane.WithOrgIDCheck(authKeys.rcKey.OrgID)) } - uptaneClient, err := uptane.NewClient( + uptaneClient, err := uptane.NewCoreAgentClient( db, newRCBackendOrgUUIDProvider(http), opt..., @@ -349,8 +436,11 @@ func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGette clock := clock.New() - return &Service{ - rcType: rcType, + return &CoreAgentService{ + Service: Service{ + rcType: rcType, + db: db, + }, firstUpdate: true, defaultRefreshInterval: options.refresh, refreshIntervalOverrideAllowed: options.refreshIntervalOverrideAllowed, @@ -362,7 +452,6 @@ func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGette tagsGetter: tagsGetter, clock: clock, traceAgentEnv: options.traceAgentEnv, - db: db, api: http, uptane: uptaneClient, clients: newClients(clock, options.clientTTL), @@ -393,7 +482,7 @@ func newRCBackendOrgUUIDProvider(http api.API) uptane.OrgUUIDProvider { } // Start the remote configuration management service -func (s *Service) Start() { +func (s *CoreAgentService) Start() { go func() { s.pollOrgStatus() for { @@ -452,7 +541,7 @@ func (s *Service) Start() { } // Stop stops the refresh loop and closes the on-disk DB cache -func (s *Service) Stop() error { +func (s *CoreAgentService) Stop() error { if s.stopConfigPoller != nil { close(s.stopConfigPoller) } @@ -460,7 +549,7 @@ func (s *Service) Stop() error { return s.db.Close() } -func (s *Service) pollOrgStatus() { +func (s *CoreAgentService) pollOrgStatus() { response, err := s.api.FetchOrgStatus(context.Background()) if err != nil { // Unauthorized and proxy error are caught by the main loop requesting the latest config, @@ -498,13 +587,13 @@ func (s *Service) pollOrgStatus() { exportedStatusKeyAuthorized.Set(strconv.FormatBool(response.Authorized)) } -func (s *Service) calculateRefreshInterval() time.Duration { +func (s *CoreAgentService) calculateRefreshInterval() time.Duration { backoffTime := s.backoffPolicy.GetBackoffDuration(s.backoffErrorCount) return s.defaultRefreshInterval + backoffTime } -func (s *Service) refresh() error { +func (s *CoreAgentService) refresh() error { s.Lock() activeClients := s.clients.activeClients() s.refreshProducts(activeClients) @@ -585,11 +674,11 @@ func (s *Service) refresh() error { return nil } -func (s *Service) forceRefresh() bool { +func (s *CoreAgentService) forceRefresh() bool { return s.firstUpdate } -func (s *Service) refreshProducts(activeClients []*pbgo.Client) { +func (s *CoreAgentService) refreshProducts(activeClients []*pbgo.Client) { for _, client := range activeClients { for _, product := range client.Products { if _, hasProduct := s.products[rdata.Product(product)]; !hasProduct { @@ -599,7 +688,7 @@ func (s *Service) refreshProducts(activeClients []*pbgo.Client) { } } -func (s *Service) getClientState() ([]byte, error) { +func (s *CoreAgentService) getClientState() ([]byte, error) { rawTargetsCustom, err := s.uptane.TargetsCustom() if err != nil { return nil, err @@ -611,7 +700,7 @@ func (s *Service) getClientState() ([]byte, error) { return custom.OpaqueBackendState, nil } -func (s *Service) getRefreshInterval() (time.Duration, error) { +func (s *CoreAgentService) getRefreshInterval() (time.Duration, error) { rawTargetsCustom, err := s.uptane.TargetsCustom() if err != nil { return 0, err @@ -633,7 +722,7 @@ func (s *Service) getRefreshInterval() (time.Duration, error) { // ClientGetConfigs is the polling API called by tracers and agents to get the latest configurations // //nolint:revive // TODO(RC) Fix revive linter -func (s *Service) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error) { +func (s *CoreAgentService) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error) { s.Lock() defer s.Unlock() err := validateRequest(request) @@ -682,7 +771,7 @@ func (s *Service) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetCon if tufVersions.DirectorTargets == request.Client.State.TargetsVersion { return &pbgo.ClientGetConfigsResponse{}, nil } - roots, err := s.getNewDirectorRoots(request.Client.State.RootVersion, tufVersions.DirectorRoot) + roots, err := s.getNewDirectorRoots(s.uptane, request.Client.State.RootVersion, tufVersions.DirectorRoot) if err != nil { return nil, err } @@ -690,7 +779,7 @@ func (s *Service) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetCon if err != nil { return nil, err } - targetFiles, err := s.getTargetFiles(rdata.StringListToProduct(request.Client.Products), request.CachedTargetFiles) + targetFiles, err := s.getTargetFiles(s.uptane, rdata.StringListToProduct(request.Client.Products), request.CachedTargetFiles) if err != nil { return nil, err } @@ -730,7 +819,7 @@ func (s *Service) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetCon } // ConfigGetState returns the state of the configuration and the director repos in the local store -func (s *Service) ConfigGetState() (*pbgo.GetStateConfigResponse, error) { +func (s *CoreAgentService) ConfigGetState() (*pbgo.GetStateConfigResponse, error) { state, err := s.uptane.State() if err != nil { return nil, err @@ -758,69 +847,6 @@ func (s *Service) ConfigGetState() (*pbgo.GetStateConfigResponse, error) { return response, nil } -func (s *Service) getNewDirectorRoots(currentVersion uint64, newVersion uint64) ([][]byte, error) { - var roots [][]byte - for i := currentVersion + 1; i <= newVersion; i++ { - root, err := s.uptane.DirectorRoot(i) - if err != nil { - return nil, err - } - canonicalRoot, err := enforceCanonicalJSON(root) - if err != nil { - return nil, err - } - roots = append(roots, canonicalRoot) - } - return roots, nil -} - -func (s *Service) getTargetFiles(products []rdata.Product, cachedTargetFiles []*pbgo.TargetFileMeta) ([]*pbgo.File, error) { - productSet := make(map[rdata.Product]struct{}) - for _, product := range products { - productSet[product] = struct{}{} - } - targets, err := s.uptane.Targets() - if err != nil { - return nil, err - } - cachedTargets := make(map[string]data.FileMeta) - for _, cachedTarget := range cachedTargetFiles { - hashes := make(data.Hashes) - for _, hash := range cachedTarget.Hashes { - h, err := hex.DecodeString(hash.Hash) - if err != nil { - return nil, err - } - hashes[hash.Algorithm] = h - } - cachedTargets[cachedTarget.Path] = data.FileMeta{ - Hashes: hashes, - Length: cachedTarget.Length, - } - } - var configFiles []*pbgo.File - for targetPath, targetMeta := range targets { - configPathMeta, err := rdata.ParseConfigPath(targetPath) - if err != nil { - return nil, err - } - if _, inClientProducts := productSet[rdata.Product(configPathMeta.Product)]; inClientProducts { - if notEqualErr := tufutil.FileMetaEqual(cachedTargets[targetPath], targetMeta.FileMeta); notEqualErr == nil { - continue - } - fileContents, err := s.uptane.TargetFile(targetPath) - if err != nil { - return nil, err - } - configFiles = append(configFiles, &pbgo.File{ - Path: targetPath, - Raw: fileContents, - }) - } - } - return configFiles, nil -} - func validateRequest(request *pbgo.ClientGetConfigsRequest) error { if request.Client == nil { return status.Error(codes.InvalidArgument, "client is a required field for client config update requests") @@ -925,3 +951,179 @@ func enforceCanonicalJSON(raw []byte) ([]byte, error) { return canonical, nil } + +// HTTPClient fetches Remote Configurations from an HTTP(s)-based backend +type HTTPClient struct { + Service + lastUpdate time.Time + uptane cdnUptaneClient +} + +// NewHTTPClient creates a new HTTPClient that can be used to fetch Remote Configurations from an HTTP(s)-based backend +// It uses a local db to cache the fetched configurations. Only one HTTPClient should be created per agent. +// An HTTPClient must be closed via HTTPClient.Close() before creating a new one. +func NewHTTPClient(runPath, site, apiKey, agentVersion string) (*HTTPClient, error) { + dbPath := path.Join(runPath, "remote-config-cdn.db") + db, err := openCacheDB(dbPath, agentVersion, apiKey) + if err != nil { + return nil, err + } + + uptaneCDNClient, err := uptane.NewCDNClient(db, site, apiKey) + if err != nil { + return nil, err + } + + return &HTTPClient{ + Service: Service{ + rcType: "CDN", + db: db, + }, + uptane: uptaneCDNClient, + }, nil +} + +// Close closes the HTTPClient and cleans up any resources. Close must be called +// before any other HTTPClients are instantiated via NewHTTPClient +func (c *HTTPClient) Close() error { + return c.db.Close() +} + +// GetCDNConfigUpdate returns any updated configs. If multiple requests have been made +// in a short amount of time, a cached response is returned. If RC has been disabled, +// an error is returned. If there is no update (the targets version is up-to-date) nil +// is returned for both the update and error. +func (c *HTTPClient) GetCDNConfigUpdate( + ctx context.Context, + products []string, + currentTargetsVersion, currentRootVersion uint64, + cachedTargetFiles []*pbgo.TargetFileMeta, +) (*state.Update, error) { + var err error + span, ctx := tracer.StartSpanFromContext(ctx, "HTTPClient.GetCDNConfigUpdate") + defer span.Finish(tracer.WithError(err)) + if !c.shouldUpdate() { + span.SetTag("use_cache", true) + return c.getUpdate(ctx, products, currentTargetsVersion, currentRootVersion, cachedTargetFiles) + } + + err = c.update(ctx) + if err != nil { + span.SetTag("cache_update_error", true) + _ = log.Warn(fmt.Sprintf("Error updating CDN config repo: %v", err)) + } + + u, err := c.getUpdate(ctx, products, currentTargetsVersion, currentRootVersion, cachedTargetFiles) + return u, err +} + +func (c *HTTPClient) update(ctx context.Context) error { + var err error + span, ctx := tracer.StartSpanFromContext(ctx, "HTTPClient.update") + defer span.Finish(tracer.WithError(err)) + c.Lock() + defer c.Unlock() + + err = c.uptane.Update(ctx) + if err != nil { + return err + } + + return nil +} + +func (c *HTTPClient) shouldUpdate() bool { + c.Lock() + defer c.Unlock() + if time.Since(c.lastUpdate) > maxCDNUpdateFrequency { + c.lastUpdate = time.Now() + return true + } + return false +} + +func (c *HTTPClient) getUpdate( + ctx context.Context, + products []string, + currentTargetsVersion, currentRootVersion uint64, + cachedTargetFiles []*pbgo.TargetFileMeta, +) (*state.Update, error) { + c.Lock() + defer c.Unlock() + span, _ := tracer.StartSpanFromContext(ctx, "HTTPClient.getUpdate") + defer span.Finish() + span.SetTag("products", products) + span.SetTag("current_targets_version", currentTargetsVersion) + span.SetTag("current_root_version", currentRootVersion) + span.SetTag("cached_target_files", cachedTargetFiles) + + tufVersions, err := c.uptane.TUFVersionState() + if err != nil { + return nil, err + } + if tufVersions.DirectorTargets == currentTargetsVersion { + return nil, nil + } + roots, err := c.getNewDirectorRoots(c.uptane, currentRootVersion, tufVersions.DirectorRoot) + if err != nil { + return nil, err + } + targetsRaw, err := c.uptane.TargetsMeta() + if err != nil { + return nil, err + } + targetFiles, err := c.getTargetFiles(c.uptane, rdata.StringListToProduct(products), cachedTargetFiles) + if err != nil { + return nil, err + } + + canonicalTargets, err := enforceCanonicalJSON(targetsRaw) + if err != nil { + return nil, err + } + + directorTargets, err := c.uptane.Targets() + if err != nil { + return nil, err + } + + productsMap := make(map[string]struct{}) + for _, product := range products { + productsMap[product] = struct{}{} + } + configs := make([]string, 0) + expiredConfigs := make([]string, 0) + for path, meta := range directorTargets { + pathMeta, err := rdata.ParseConfigPath(path) + if err != nil { + return nil, err + } + if _, productRequested := productsMap[pathMeta.Product]; !productRequested { + continue + } + configMetadata, err := parseFileMetaCustom(meta.Custom) + if err != nil { + return nil, err + } + if configExpired(configMetadata.Expires) { + expiredConfigs = append(expiredConfigs, path) + continue + } + + configs = append(configs, path) + } + + fileMap := make(map[string][]byte, len(targetFiles)) + for _, f := range targetFiles { + fileMap[f.Path] = f.Raw + } + + span.SetTag("configs.returned", configs) + span.SetTag("configs.expired", expiredConfigs) + return &state.Update{ + TUFRoots: roots, + TUFTargets: canonicalTargets, + TargetFiles: fileMap, + ClientConfigs: configs, + }, nil +} diff --git a/pkg/config/remote/service/service_test.go b/pkg/config/remote/service/service_test.go index 683713d49a747..860b6546fcbe0 100644 --- a/pkg/config/remote/service/service_test.go +++ b/pkg/config/remote/service/service_test.go @@ -42,6 +42,11 @@ const ( testEnv = "test-env" ) +const ( + site = "test-site" + k = "test-api-key" +) + // Setup overrides for tests func init() { uuid.GetUUID = func() string { @@ -69,11 +74,24 @@ type mockUptane struct { mock.Mock } -func (m *mockUptane) Update(response *pbgo.LatestConfigsResponse) error { +type mockCoreAgentUptane struct { + mockUptane +} + +type mockCDNUptane struct { + mockUptane +} + +func (m *mockCoreAgentUptane) Update(response *pbgo.LatestConfigsResponse) error { args := m.Called(response) return args.Error(0) } +func (m *mockCDNUptane) Update(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + func (m *mockUptane) State() (uptane.State, error) { args := m.Called() return args.Get(0).(uptane.State), args.Error(1) @@ -139,7 +157,7 @@ var testRCKey = msgpgo.RemoteConfigKey{ Datacenter: "dd.com", } -func newTestService(t *testing.T, api *mockAPI, uptane *mockUptane, clock clock.Clock) *Service { +func newTestService(t *testing.T, api *mockAPI, uptane *mockCoreAgentUptane, clock clock.Clock) *CoreAgentService { cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) cfg.SetWithoutSource("hostname", "test-hostname") @@ -167,7 +185,7 @@ func newTestService(t *testing.T, api *mockAPI, uptane *mockUptane, clock clock. func TestServiceBackoffFailure(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -251,7 +269,7 @@ func TestServiceBackoffFailure(t *testing.T) { func TestServiceBackoffFailureRecovery(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -319,7 +337,7 @@ func customMeta(tracerPredicates []*pbgo.TracerPredicateV1, expiration int64) *j // gRPC's InvalidArgument status code. func TestClientGetConfigsRequestMissingFields(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -384,7 +402,7 @@ func TestClientGetConfigsRequestMissingFields(t *testing.T) { func TestService(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -415,7 +433,7 @@ func TestService(t *testing.T) { api.AssertExpectations(t) uptaneClient.AssertExpectations(t) - *uptaneClient = mockUptane{} + *uptaneClient = mockCoreAgentUptane{} *api = mockAPI{} root3 := []byte(`{"signatures": "testroot3", "signed": "signed"}`) @@ -530,7 +548,7 @@ func TestServiceClientPredicates(t *testing.T) { lastConfigResponse := &pbgo.LatestConfigsResponse{ TargetFiles: []*pbgo.File{{Path: "test"}}, } - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} api := &mockAPI{} service := newTestService(t, api, uptaneClient, clock) @@ -624,7 +642,7 @@ func TestServiceClientPredicates(t *testing.T) { func TestServiceGetRefreshIntervalNone(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -664,7 +682,7 @@ func TestServiceGetRefreshIntervalNone(t *testing.T) { func TestServiceGetRefreshIntervalValid(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -704,7 +722,7 @@ func TestServiceGetRefreshIntervalValid(t *testing.T) { func TestServiceGetRefreshIntervalTooSmall(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -744,7 +762,7 @@ func TestServiceGetRefreshIntervalTooSmall(t *testing.T) { func TestServiceGetRefreshIntervalTooBig(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -784,7 +802,7 @@ func TestServiceGetRefreshIntervalTooBig(t *testing.T) { func TestServiceGetRefreshIntervalNoOverrideAllowed(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -836,7 +854,7 @@ func TestConfigExpiration(t *testing.T) { lastConfigResponse := &pbgo.LatestConfigsResponse{ TargetFiles: []*pbgo.File{{Path: "test"}}, } - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} api := &mockAPI{} service := newTestService(t, api, uptaneClient, clock) @@ -914,7 +932,7 @@ func TestConfigExpiration(t *testing.T) { func TestOrgStatus(t *testing.T) { api := &mockAPI{} clock := clock.NewMock() - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} service := newTestService(t, api, uptaneClient, clock) response := &pbgo.OrgStatusResponse{ @@ -1146,3 +1164,103 @@ func TestWithClientTTL(t *testing.T) { func getHostTags() []string { return []string{"dogo_state:hungry"} } + +func setupCDNClient(t *testing.T, uptaneClient *mockCDNUptane) *HTTPClient { + client, err := NewHTTPClient(t.TempDir(), site, k, "9.9.9") + require.NoError(t, err) + if uptaneClient != nil { + client.uptane = uptaneClient + } + return client +} + +// TestHTTPClientRecentUpdate tests that with a recent (<50s ago) last-update-time, +// the client will not fetch a new update and will return the cached state +func TestHTTPClientRecentUpdate(t *testing.T) { + uptaneClient := &mockCDNUptane{} + uptaneClient.On("TUFVersionState").Return(uptane.TUFVersions{ + DirectorRoot: 1, + DirectorTargets: 1, + ConfigRoot: 1, + ConfigSnapshot: 1, + }, nil) + uptaneClient.On("DirectorRoot", uint64(1)).Return([]byte(`{"signatures": "testroot1", "signed": "one"}`), nil) + uptaneClient.On("TargetsMeta").Return([]byte(`{"signatures": "testtargets", "signed": "stuff"}`), nil) + uptaneClient.On("Targets").Return( + data.TargetFiles{ + "datadog/2/TESTING1/id/1": {}, + "datadog/2/TESTING2/id/2": {}, + }, + nil, + ) + uptaneClient.On("TargetFile", "datadog/2/TESTING1/id/1").Return([]byte(`testing_1`), nil) + + client := setupCDNClient(t, uptaneClient) + defer client.Close() + client.lastUpdate = time.Now() + + u, err := client.GetCDNConfigUpdate(context.TODO(), []string{"TESTING1"}, 0, 0, []*pbgo.TargetFileMeta{}) + require.NoError(t, err) + uptaneClient.AssertExpectations(t) + require.NotNil(t, u) + require.Len(t, u.TargetFiles, 1) + require.Equal(t, []byte(`testing_1`), u.TargetFiles["datadog/2/TESTING1/id/1"]) + require.Len(t, u.ClientConfigs, 1) + require.Equal(t, "datadog/2/TESTING1/id/1", u.ClientConfigs[0]) + require.Len(t, u.TUFRoots, 1) + require.Equal(t, []byte(`{"signatures":"testroot1","signed":"one"}`), u.TUFRoots[0]) +} + +// TestHTTPClientUpdateSuccess tests that a stale state will trigger an update of the cached state +// before returning the cached state. In the event that the Update fails, the stale state will be returned. +func TestHTTPClientUpdateSuccess(t *testing.T) { + var tests = []struct { + updateSucceeds bool + }{ + {true}, + {false}, + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("updateSucceeds=%t", tt.updateSucceeds), func(t *testing.T) { + uptaneClient := &mockCDNUptane{} + uptaneClient.On("TUFVersionState").Return(uptane.TUFVersions{ + DirectorRoot: 1, + DirectorTargets: 1, + ConfigRoot: 1, + ConfigSnapshot: 1, + }, nil) + uptaneClient.On("DirectorRoot", uint64(1)).Return([]byte(`{"signatures": "testroot1", "signed": "one"}`), nil) + uptaneClient.On("TargetsMeta").Return([]byte(`{"signatures": "testtargets", "signed": "stuff"}`), nil) + uptaneClient.On("Targets").Return( + data.TargetFiles{ + "datadog/2/TESTING1/id/1": {}, + "datadog/2/TESTING2/id/2": {}, + }, + nil, + ) + uptaneClient.On("TargetFile", "datadog/2/TESTING1/id/1").Return([]byte(`testing_1`), nil) + + updateErr := fmt.Errorf("uh oh") + if tt.updateSucceeds { + updateErr = nil + } + uptaneClient.On("Update", mock.Anything).Return(updateErr) + + client := setupCDNClient(t, uptaneClient) + defer client.Close() + client.lastUpdate = time.Now().Add(time.Second * -60) + + u, err := client.GetCDNConfigUpdate(context.TODO(), []string{"TESTING1"}, 0, 0, []*pbgo.TargetFileMeta{}) + require.NoError(t, err) + uptaneClient.AssertExpectations(t) + require.NotNil(t, u) + require.Len(t, u.TargetFiles, 1) + require.Equal(t, []byte(`testing_1`), u.TargetFiles["datadog/2/TESTING1/id/1"]) + require.Len(t, u.ClientConfigs, 1) + require.Equal(t, "datadog/2/TESTING1/id/1", u.ClientConfigs[0]) + require.Len(t, u.TUFRoots, 1) + require.Equal(t, []byte(`{"signatures":"testroot1","signed":"one"}`), u.TUFRoots[0]) + }) + } +} diff --git a/pkg/config/remote/uptane/client.go b/pkg/config/remote/uptane/client.go index c0ace9b0321e3..f56e86fba0b8f 100644 --- a/pkg/config/remote/uptane/client.go +++ b/pkg/config/remote/uptane/client.go @@ -9,14 +9,17 @@ package uptane import ( "bytes" + "context" "fmt" + "github.com/pkg/errors" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "net/http" "strings" "sync" "time" "github.com/DataDog/go-tuf/client" "github.com/DataDog/go-tuf/data" - "github.com/pkg/errors" "go.etcd.io/bbolt" rdata "github.com/DataDog/datadog-agent/pkg/config/remote/data" @@ -31,24 +34,38 @@ type Client struct { orgID int64 orgUUIDProvider OrgUUIDProvider - configLocalStore *localStore - configRemoteStore *remoteStoreConfig - configTUFClient *client.Client + configLocalStore *localStore + configTUFClient *client.Client + configRootOverride string + directorLocalStore *localStore + directorTUFClient *client.Client - directorLocalStore *localStore - directorRemoteStore *remoteStoreDirector - directorTUFClient *client.Client directorRootOverride string - - targetStore *targetStore - orgStore *orgStore + targetStore *targetStore + orgStore *orgStore cachedVerify bool cachedVerifyTime time.Time // TUF transaction tracker transactionalStore *transactionalStore + + orgVerificationEnabled bool +} + +// CoreAgentClient is an uptane client that fetches the latest configs from the Core Agent +type CoreAgentClient struct { + *Client + configRemoteStore *remoteStoreConfig + directorRemoteStore *remoteStoreDirector +} + +// CDNClient is an uptane client that fetches the latest configs from the server over HTTP(s) +type CDNClient struct { + *Client + directorRemoteStore *cdnRemoteDirectorStore + configRemoteStore *cdnRemoteConfigStore } // ClientOption describes a function in charge of changing the uptane client @@ -80,23 +97,26 @@ func WithConfigRootOverride(site string, configRootOverride string) ClientOption // OrgUUIDProvider is a provider of the agent org UUID type OrgUUIDProvider func() (string, error) -// NewClient creates a new uptane client -func NewClient(cacheDB *bbolt.DB, orgUUIDProvider OrgUUIDProvider, options ...ClientOption) (c *Client, err error) { +// NewCoreAgentClient creates a new uptane client +func NewCoreAgentClient(cacheDB *bbolt.DB, orgUUIDProvider OrgUUIDProvider, options ...ClientOption) (c *CoreAgentClient, err error) { transactionalStore := newTransactionalStore(cacheDB) targetStore := newTargetStore(transactionalStore) orgStore := newOrgStore(transactionalStore) - c = &Client{ + c = &CoreAgentClient{ configRemoteStore: newRemoteStoreConfig(targetStore), directorRemoteStore: newRemoteStoreDirector(targetStore), - targetStore: targetStore, - orgStore: orgStore, - transactionalStore: transactionalStore, - orgUUIDProvider: orgUUIDProvider, + Client: &Client{ + orgStore: orgStore, + orgUUIDProvider: orgUUIDProvider, + targetStore: targetStore, + transactionalStore: transactionalStore, + orgVerificationEnabled: true, + }, } for _, o := range options { - o(c) + o(c.Client) } if c.configLocalStore, err = newLocalStoreConfig(transactionalStore, c.site, c.configRootOverride); err != nil { @@ -113,7 +133,7 @@ func NewClient(cacheDB *bbolt.DB, orgUUIDProvider OrgUUIDProvider, options ...Cl } // Update updates the uptane client and rollbacks in case of error -func (c *Client) Update(response *pbgo.LatestConfigsResponse) error { +func (c *CoreAgentClient) Update(response *pbgo.LatestConfigsResponse) error { c.Lock() defer c.Unlock() c.cachedVerify = false @@ -134,7 +154,7 @@ func (c *Client) Update(response *pbgo.LatestConfigsResponse) error { } // update updates the uptane client -func (c *Client) update(response *pbgo.LatestConfigsResponse) error { +func (c *CoreAgentClient) update(response *pbgo.LatestConfigsResponse) error { err := c.updateRepos(response) if err != nil { return err @@ -146,6 +166,121 @@ func (c *Client) update(response *pbgo.LatestConfigsResponse) error { return c.verify() } +func (c *CoreAgentClient) updateRepos(response *pbgo.LatestConfigsResponse) error { + err := c.targetStore.storeTargetFiles(response.TargetFiles) + if err != nil { + return err + } + c.directorRemoteStore.update(response) + c.configRemoteStore.update(response) + _, err = c.directorTUFClient.Update() + if err != nil { + return errors.Wrap(err, "failed updating director repository") + } + _, err = c.configTUFClient.Update() + if err != nil { + e := fmt.Sprintf("could not update config repository [%s]", configMetasUpdateSummary(response.ConfigMetas)) + return errors.Wrap(err, e) + } + return nil +} + +// NewCDNClient creates a new uptane client that will fetch the latest configs from the server over HTTP(s) +func NewCDNClient(cacheDB *bbolt.DB, site, apiKey string, options ...ClientOption) (c *CDNClient, err error) { + transactionalStore := newTransactionalStore(cacheDB) + targetStore := newTargetStore(transactionalStore) + orgStore := newOrgStore(transactionalStore) + + httpClient := &http.Client{} + + c = &CDNClient{ + configRemoteStore: newCDNRemoteConfigStore(httpClient, site, apiKey), + directorRemoteStore: newCDNRemoteDirectorStore(httpClient, site, apiKey), + Client: &Client{ + site: site, + targetStore: targetStore, + transactionalStore: transactionalStore, + orgStore: orgStore, + orgVerificationEnabled: false, + orgUUIDProvider: func() (string, error) { + return "", nil + }, + }, + } + for _, o := range options { + o(c.Client) + } + + if c.configLocalStore, err = newLocalStoreConfig(transactionalStore, site, c.configRootOverride); err != nil { + return nil, err + } + + if c.directorLocalStore, err = newLocalStoreDirector(transactionalStore, site, c.directorRootOverride); err != nil { + return nil, err + } + + c.configTUFClient = client.NewClient(c.configLocalStore, c.configRemoteStore) + c.directorTUFClient = client.NewClient(c.directorLocalStore, c.directorRemoteStore) + return c, nil +} + +// Update updates the uptane client and rollbacks in case of error +func (c *CDNClient) Update(ctx context.Context) error { + var err error + span, ctx := tracer.StartSpanFromContext(ctx, "CDNClient.Update") + defer span.Finish(tracer.WithError(err)) + c.Lock() + defer c.Unlock() + c.cachedVerify = false + + // in case the commit is successful it is a no-op. + // the defer is present to be sure a transaction is never left behind. + defer c.transactionalStore.rollback() + + err = c.update(ctx) + if err != nil { + c.configTUFClient = client.NewClient(c.configLocalStore, c.configRemoteStore) + c.directorTUFClient = client.NewClient(c.directorLocalStore, c.directorRemoteStore) + return err + } + return c.transactionalStore.commit() +} + +// update updates the uptane client +func (c *CDNClient) update(ctx context.Context) error { + var err error + span, ctx := tracer.StartSpanFromContext(ctx, "CDNClient.update") + defer span.Finish(tracer.WithError(err)) + + err = c.updateRepos(ctx) + if err != nil { + return err + } + err = c.pruneTargetFiles() + if err != nil { + return err + } + return c.verify() +} + +func (c *CDNClient) updateRepos(ctx context.Context) error { + var err error + span, _ := tracer.StartSpanFromContext(ctx, "CDNClient.updateRepos") + defer span.Finish(tracer.WithError(err)) + + _, err = c.directorTUFClient.Update() + if err != nil { + err = errors.Wrap(err, "failed updating director repository") + return err + } + _, err = c.configTUFClient.Update() + if err != nil { + err = errors.Wrap(err, "could not update config repository") + return err + } + return nil +} + // TargetsCustom returns the current targets custom of this uptane client func (c *Client) TargetsCustom() ([]byte, error) { c.Lock() @@ -225,25 +360,6 @@ func (c *Client) TargetsMeta() ([]byte, error) { return targets, nil } -func (c *Client) updateRepos(response *pbgo.LatestConfigsResponse) error { - err := c.targetStore.storeTargetFiles(response.TargetFiles) - if err != nil { - return err - } - c.directorRemoteStore.update(response) - c.configRemoteStore.update(response) - _, err = c.directorTUFClient.Update() - if err != nil { - return errors.Wrap(err, "failed updating director repository") - } - _, err = c.configTUFClient.Update() - if err != nil { - e := fmt.Sprintf("could not update config repository [%s]", configMetasUpdateSummary(response.ConfigMetas)) - return errors.Wrap(err, e) - } - return nil -} - func (c *Client) pruneTargetFiles() error { targetFiles, err := c.directorTUFClient.Targets() if err != nil { @@ -302,6 +418,9 @@ func (c *Client) StoredOrgUUID() (string, error) { } func (c *Client) verifyOrg() error { + if !c.orgVerificationEnabled { + return nil + } rawCustom, err := c.configLocalStore.GetMetaCustom(metaSnapshot) if err != nil { return fmt.Errorf("could not obtain snapshot custom: %v", err) diff --git a/pkg/config/remote/uptane/client_test.go b/pkg/config/remote/uptane/client_test.go index 3f59d6c1f93f7..171eb605b985f 100644 --- a/pkg/config/remote/uptane/client_test.go +++ b/pkg/config/remote/uptane/client_test.go @@ -42,13 +42,13 @@ func newTestConfig(repo testRepositories) model.Config { return cfg } -func newTestClient(db *bbolt.DB, cfg model.Config) (*Client, error) { +func newTestClient(db *bbolt.DB, cfg model.Config) (*CoreAgentClient, error) { opts := []ClientOption{ WithOrgIDCheck(2), WithConfigRootOverride("datadoghq.com", cfg.GetString("remote_configuration.config_root")), WithDirectorRootOverride("datadoghq.com", cfg.GetString("remote_configuration.director_root")), } - return NewClient(db, getTestOrgUUIDProvider(2), opts...) + return NewCoreAgentClient(db, getTestOrgUUIDProvider(2), opts...) } func TestClientState(t *testing.T) { @@ -278,7 +278,7 @@ func TestClientVerifyOrgUUID(t *testing.T) { func TestOrgStore(t *testing.T) { db := getTestDB(t) - client, err := NewClient(db, getTestOrgUUIDProvider(2), WithOrgIDCheck(2)) + client, err := NewCoreAgentClient(db, getTestOrgUUIDProvider(2), WithOrgIDCheck(2)) assert.NoError(t, err) // Store key diff --git a/pkg/config/remote/uptane/remote_store.go b/pkg/config/remote/uptane/remote_store.go index 0286057ce1474..88c28666cb116 100644 --- a/pkg/config/remote/uptane/remote_store.go +++ b/pkg/config/remote/uptane/remote_store.go @@ -7,7 +7,11 @@ package uptane import ( "bytes" + "fmt" "io" + "net/http" + "path" + "strings" "github.com/DataDog/go-tuf/client" @@ -82,7 +86,7 @@ func (s *remoteStore) GetMeta(path string) (io.ReadCloser, int64, error) { return io.NopCloser(bytes.NewReader(requestedVersion)), int64(len(requestedVersion)), nil } -// GetMeta implements go-tuf's RemoteStore.GetTarget +// GetTarget implements go-tuf's RemoteStore.GetTarget // See https://pkg.go.dev/github.com/DataDog/go-tuf/client#RemoteStore func (s *remoteStore) GetTarget(targetPath string) (stream io.ReadCloser, size int64, err error) { target, found, err := s.targetStore.getTargetFile(targetPath) @@ -167,3 +171,142 @@ func (sc *remoteStoreConfig) update(update *pbgo.LatestConfigsResponse) { sc.metas[roleTargets][metas.TopTargets.Version] = metas.TopTargets.Raw } } + +// cdnRemoteStore implements go-tuf's RemoteStore +// It is an HTTP interface to an authenticated remote server that serves an uptane repository +// See https://pkg.go.dev/github.com/DataDog/go-tuf/client#RemoteStore +type cdnRemoteStore struct { + httpClient RequestDoer + host string + pathPrefix string + apiKey string + repositoryType string + + authnToken string +} + +// RequestDoer is an interface that abstracts the http.Client.Do method +type RequestDoer interface { + Do(req *http.Request) (*http.Response, error) +} + +type cdnRemoteConfigStore struct { + cdnRemoteStore +} + +type cdnRemoteDirectorStore struct { + cdnRemoteStore +} + +// getCDNHostnameFromSite returns the staging or production CDN hostname for a given site. +// Site can be any of the (non-fed) documented DD sites per https://docs.datadoghq.com/getting_started/site/ +func getCDNHostnameFromSite(site string) string { + s := strings.TrimPrefix(site, "https://") + switch s { + // staging: + case "datad0g.com": + return "remote-config.datad0g.com" + // prod: + case "ap1.datadoghq.com": + return "remote-config.datadoghq.com" + case "us5.datadoghq.com": + return "remote-config.datadoghq.com" + case "us3.datadoghq.com": + return "remote-config.datadoghq.com" + case "app.datadoghq.eu": + return "remote-config.datadoghq.com" + case "app.datadoghq.com": + return "remote-config.datadoghq.com" + } + return "remote-config.datadoghq.com" +} + +// Trims any schemas or non-datacenter related subdomains from the site to get the path prefix for the CDN +// e.g. https://us3.datadoghq.com -> us3.datadoghq.com +// e.g. https://app.datadoghq.com -> datadoghq.com +func getCDNPathPrefixFromSite(site string) string { + s := strings.TrimPrefix(site, "https://app.") + s = strings.TrimPrefix(s, "https://") + return s +} + +func newCDNRemoteConfigStore(client *http.Client, site, apiKey string) *cdnRemoteConfigStore { + return &cdnRemoteConfigStore{ + cdnRemoteStore: cdnRemoteStore{ + httpClient: client, + host: getCDNHostnameFromSite(site), + pathPrefix: getCDNPathPrefixFromSite(site), + apiKey: apiKey, + repositoryType: "config", + }, + } +} + +func newCDNRemoteDirectorStore(client *http.Client, site, apiKey string) *cdnRemoteDirectorStore { + return &cdnRemoteDirectorStore{ + cdnRemoteStore: cdnRemoteStore{ + httpClient: client, + host: getCDNHostnameFromSite(site), + pathPrefix: getCDNPathPrefixFromSite(site), + apiKey: apiKey, + repositoryType: "director", + }, + } +} + +// GetMeta implements go-tuf's RemoteStore.GetMeta +// See https://pkg.go.dev/github.com/DataDog/go-tuf/client#RemoteStore +func (s *cdnRemoteStore) GetMeta(p string) (io.ReadCloser, int64, error) { + return s.getRCFile(path.Join(s.repositoryType, p)) +} + +// GetTarget implements go-tuf's RemoteStore.GetTarget +// See https://pkg.go.dev/github.com/DataDog/go-tuf/client#RemoteStore +func (s *cdnRemoteStore) GetTarget(path string) (io.ReadCloser, int64, error) { + return s.getRCFile(path) +} + +func (s *cdnRemoteStore) newAuthenticatedHTTPReq(method, p string) (*http.Request, error) { + req, err := http.NewRequest(method, s.host, nil) + if err != nil { + return nil, err + } + + req.Header.Add("X-Dd-Api-Key", s.apiKey) + if s.authnToken != "" { + req.Header.Add("Authorization", s.authnToken) + } + + req.URL.Scheme = "https" + req.URL.Host = s.host + req.URL.Path = "/" + path.Join(s.pathPrefix, p) + req.Host = s.host + + return req, err +} + +func (s *cdnRemoteStore) updateAuthnToken(resp *http.Response) { + authToken := resp.Header.Get("X-Dd-Refreshed-Authorization") + if authToken != "" { + s.authnToken = authToken + } +} + +func (s *cdnRemoteStore) getRCFile(path string) (io.ReadCloser, int64, error) { + req, err := s.newAuthenticatedHTTPReq("GET", path) + if err != nil { + return nil, 0, err + } + resp, err := s.httpClient.Do(req) + if err != nil { + return nil, 0, err + } + if resp.StatusCode == http.StatusNotFound { + return nil, 0, client.ErrNotFound{File: path} + } + if resp.StatusCode != http.StatusOK { + return nil, 0, fmt.Errorf("unexpected status code %d", resp.StatusCode) + } + s.updateAuthnToken(resp) + return resp.Body, resp.ContentLength, nil +} diff --git a/pkg/config/remote/uptane/remote_store_test.go b/pkg/config/remote/uptane/remote_store_test.go index 0d4ad9295ac13..9d9d09e8cd15d 100644 --- a/pkg/config/remote/uptane/remote_store_test.go +++ b/pkg/config/remote/uptane/remote_store_test.go @@ -7,7 +7,13 @@ package uptane import ( "fmt" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "io" + "net/http" + "path" + "strconv" + "strings" "testing" "github.com/DataDog/go-tuf/client" @@ -16,6 +22,11 @@ import ( pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) +const ( + host = "test-host" + k = "test" +) + func generateUpdate(baseVersion uint64) *pbgo.LatestConfigsResponse { baseVersion *= 10000 return &pbgo.LatestConfigsResponse{ @@ -239,3 +250,254 @@ func assertGetTarget(t *testing.T, store *remoteStore, path string, expectedCont assert.NoError(t, err) assert.Equal(t, expectedContent, content) } + +type mockHTTPClient struct { + mock.Mock +} + +func (m *mockHTTPClient) Do(req *http.Request) (*http.Response, error) { + args := m.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} + +func getRequestMatcher(storeType, p, apiKey, token string) interface{} { + return mock.MatchedBy(func(arg interface{}) bool { + req := arg.(*http.Request) + return req.Method == "GET" && + req.URL.Scheme == "https" && + req.URL.Host == host && + req.URL.Path == "/"+path.Join("test-site", storeType, p) && + req.Host == host && + req.Header.Get("X-Dd-Api-Key") == apiKey && + req.Header.Get("Authorization") == token + }) +} + +// TestCDNRemoteStore tests that a series of GetMeta and GetTarget invocations will make the +// correct HTTP requests and handle authz tokens correctly +func TestCDNRemoteStore(t *testing.T) { + storeType := "director" + root2 := "path/to/2.root.json" + body2 := "body2" + length := len(body2) + + // First GetMeta request should pass the api key but no token, since the remote store is freshly initialized + apiKeyMatcher := getRequestMatcher(storeType, root2, k, "") + httpClient := &mockHTTPClient{} + + // Response with no authz token in the response headers + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(body2)), + ContentLength: int64(length), + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: storeType, + } + + readCloser, contentLength, err := cdnStore.GetMeta(root2) + require.NoError(t, err) + require.NotNil(t, readCloser) + require.Equal(t, int64(length), contentLength) + content := make([]byte, length) + n, err := readCloser.Read(content) + require.NoError(t, err) + require.Equal(t, length, n) + require.Equal(t, body2, string(content)) + httpClient.AssertExpectations(t) + require.NoError(t, readCloser.Close()) + + root3 := "path/to/3.root.json" + body3 := "body3" + length = len(body3) + // For the second GetMeta request, we still expect to only pass the api key, since the first request's response did not contain a token + apiKeyMatcher = getRequestMatcher(storeType, root3, k, "") + + // Second response will include an authz token in the headers + token := "Bearer test-token" + resp = &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(body3)), + ContentLength: int64(length), + Header: http.Header{ + "X-Dd-Refreshed-Authorization": []string{token}, + }, + } + + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + readCloser, contentLength, err = cdnStore.GetMeta(root3) + require.NoError(t, err) + require.NotNil(t, readCloser) + require.Equal(t, int64(length), contentLength) + content = make([]byte, length) + n, err = readCloser.Read(content) + require.NoError(t, err) + require.Equal(t, length, n) + require.Equal(t, body3, string(content)) + httpClient.AssertExpectations(t) + require.NoError(t, readCloser.Close()) + + root4 := "path/to/4.root.json" + body4 := "body4" + resp = &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(body4)), + ContentLength: int64(length), + } + + // For the third and final GetMeta request, we still expect to pass both the api key and the authz token that was returned in the second response + apiKeyAndAuthzMatcher := getRequestMatcher(storeType, root4, k, token) + httpClient.On("Do", apiKeyAndAuthzMatcher).Return(resp, nil) + + readCloser, contentLength, err = cdnStore.GetMeta(root4) + require.NoError(t, err) + require.NotNil(t, readCloser) + require.Equal(t, int64(length), contentLength) + content = make([]byte, length) + n, err = readCloser.Read(content) + require.NoError(t, err) + require.Equal(t, length, n) + require.Equal(t, body4, string(content)) + httpClient.AssertExpectations(t) + require.NoError(t, readCloser.Close()) + + // Lastly, perform a GetTarget request to ensure that the authz token is passed along correctly, and + // the path is correctly constructed (does not have the repository type prefix) + target := "path/to/target/abc" + body := "targetBody" + length = len(body) + targetMatcher := getRequestMatcher("", target, k, token) + resp = &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(body)), + ContentLength: int64(length), + } + httpClient.On("Do", targetMatcher).Return(resp, nil) + readCloser, contentLength, err = cdnStore.GetTarget(target) + require.NoError(t, err) + require.NotNil(t, readCloser) + require.Equal(t, int64(length), contentLength) + content = make([]byte, length) + n, err = readCloser.Read(content) + require.NoError(t, err) + require.Equal(t, length, n) + require.Equal(t, body, string(content)) + httpClient.AssertExpectations(t) + require.NoError(t, readCloser.Close()) +} + +func TestGetMetaNotFound(t *testing.T) { + storeType := "director" + root2 := "path/to/2.root.json" + + apiKeyMatcher := getRequestMatcher(storeType, root2, k, "") + httpClient := &mockHTTPClient{} + + resp := &http.Response{ + StatusCode: http.StatusNotFound, + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: storeType, + } + + readCloser, contentLength, err := cdnStore.GetMeta(root2) + require.Error(t, err) + require.ErrorIs(t, err, client.ErrNotFound{File: path.Join(storeType, root2)}) + require.Nil(t, readCloser) + require.Equal(t, int64(0), contentLength) + httpClient.AssertExpectations(t) +} + +func TestGetMetaError(t *testing.T) { + storeType := "director" + root2 := "path/to/2.root.json" + + apiKeyMatcher := getRequestMatcher(storeType, root2, k, "") + httpClient := &mockHTTPClient{} + + resp := &http.Response{ + StatusCode: http.StatusInternalServerError, + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: storeType, + } + + readCloser, contentLength, err := cdnStore.GetMeta(root2) + require.Error(t, err) + require.Equal(t, err.Error(), "unexpected status code "+strconv.Itoa(http.StatusInternalServerError)) + require.Nil(t, readCloser) + require.Equal(t, int64(0), contentLength) + httpClient.AssertExpectations(t) +} + +func TestGetTargetNotFound(t *testing.T) { + targetFile := "path/to/target/abc" + + apiKeyMatcher := getRequestMatcher("", targetFile, k, "") + httpClient := &mockHTTPClient{} + + resp := &http.Response{ + StatusCode: http.StatusNotFound, + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: "director", + } + + readCloser, contentLength, err := cdnStore.GetTarget(targetFile) + require.Error(t, err) + require.ErrorIs(t, err, client.ErrNotFound{File: targetFile}) + require.Nil(t, readCloser) + require.Equal(t, int64(0), contentLength) + httpClient.AssertExpectations(t) +} + +func TestGetTargetError(t *testing.T) { + targetFile := "path/to/target/abc" + + apiKeyMatcher := getRequestMatcher("", targetFile, k, "") + httpClient := &mockHTTPClient{} + + resp := &http.Response{ + StatusCode: http.StatusInternalServerError, + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: "director", + } + + readCloser, contentLength, err := cdnStore.GetTarget(targetFile) + require.Error(t, err) + require.Equal(t, err.Error(), "unexpected status code "+strconv.Itoa(http.StatusInternalServerError)) + require.Nil(t, readCloser) + require.Equal(t, int64(0), contentLength) + httpClient.AssertExpectations(t) +} diff --git a/pkg/config/render_config.go b/pkg/config/render_config.go index 7d2412e8a9ac1..e56f0e86c578b 100644 --- a/pkg/config/render_config.go +++ b/pkg/config/render_config.go @@ -50,6 +50,7 @@ type context struct { SNMP bool SecurityModule bool SecurityAgent bool + SBOM bool // enables CSM Vulnerability Management NetworkModule bool // Sub-module of System Probe UniversalServiceMonitoringModule bool // Sub-module of System Probe DataStreamsModule bool // Sub-module of System Probe @@ -87,6 +88,7 @@ func mkContext(buildType string) context { Kubelet: true, KubeApiServer: true, // TODO: remove when phasing out from node-agent Compliance: true, + SBOM: true, SNMP: true, PrometheusScrape: true, OTLP: true, diff --git a/pkg/config/settings/runtime_profiling.go b/pkg/config/settings/runtime_profiling.go index 60d1c946bd7bc..108043204d203 100644 --- a/pkg/config/settings/runtime_profiling.go +++ b/pkg/config/settings/runtime_profiling.go @@ -11,7 +11,7 @@ import ( "github.com/fatih/color" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // ProfilingOpts defines the options used for profiling @@ -24,7 +24,7 @@ type ProfilingOpts struct { // ExecWithRuntimeProfilingSettings runs the callback func with the given runtime profiling settings func ExecWithRuntimeProfilingSettings(callback func(), opts ProfilingOpts, settingsClient Client) error { - if err := util.SetAuthToken(config.Datadog()); err != nil { + if err := util.SetAuthToken(pkgconfigsetup.Datadog()); err != nil { return fmt.Errorf("unable to set up authentication token: %v", err) } diff --git a/pkg/config/settings/runtime_setting_profiling.go b/pkg/config/settings/runtime_setting_profiling.go index 4517e6deeac3e..6086640275352 100644 --- a/pkg/config/settings/runtime_setting_profiling.go +++ b/pkg/config/settings/runtime_setting_profiling.go @@ -10,8 +10,8 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/profiling" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -72,7 +72,7 @@ func (l *ProfilingRuntimeSetting) Set(config config.Component, v interface{}, so if profile { // populate site - s := pkgconfig.DefaultSite + s := pkgconfigsetup.DefaultSite if config.IsSet(l.ConfigPrefix + "site") { s = config.GetString(l.ConfigPrefix + "site") } diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 85df73f4c7eed..39676f667d57b 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -28,6 +28,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" pkgconfigenv "github.com/DataDog/datadog-agent/pkg/config/env" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/config/nodetreemodel" + "github.com/DataDog/datadog-agent/pkg/config/teeconfig" "github.com/DataDog/datadog-agent/pkg/util/hostname/validate" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -78,8 +80,10 @@ const ( // DefaultCompressorKind is the default compressor. Options available are 'zlib' and 'zstd' DefaultCompressorKind = "zlib" - // DefaultZstdCompressionLevel should mirror the default compression level defined in https://github.com/DataDog/zstd/blob/1.x/zstd.go#L23 - DefaultZstdCompressionLevel = 5 + // DefaultZstdCompressionLevel is the default compression level for `zstd`. + // Compression level 1 provides the lowest compression ratio, but uses much less RSS especially + // in situations where we have a high value for `GOMAXPROCS`. + DefaultZstdCompressionLevel = 1 // DefaultLogsSenderBackoffFactor is the default logs sender backoff randomness factor DefaultLogsSenderBackoffFactor = 2.0 @@ -105,7 +109,10 @@ const ( DefaultMaxMessageSizeBytes = 256 * 1000 // DefaultNetworkPathTimeout defines the default timeout for a network path test - DefaultNetworkPathTimeout = 10000 + DefaultNetworkPathTimeout = 1000 + + // DefaultNetworkPathMaxTTL defines the default maximum TTL for traceroute tests + DefaultNetworkPathMaxTTL = 30 ) // datadog is the global configuration object @@ -235,8 +242,23 @@ var serverlessConfigComponents = []func(pkgconfigmodel.Setup){ func init() { osinit() + // Configure Datadog global configuration - datadog = pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + envvar, found := os.LookupEnv("DD_CONF_NODETREEMODEL") + // Possible values for DD_CONF_NODETREEMODEL: + // - "enable": Use the nodetreemodel for the config, instead of viper + // - "tee": Construct both viper and nodetreemodel. Write to both, only read from viper + // - other: Use viper for the config + if found && envvar == "enable" { + datadog = nodetreemodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + } else if found && envvar == "tee" { + var viperConfig = pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + var nodetreeConfig = nodetreemodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + datadog = teeconfig.NewTeeConfig(viperConfig, nodetreeConfig) + } else { + datadog = pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + } + systemProbe = pkgconfigmodel.NewConfig("system-probe", "DD", strings.NewReplacer(".", "_")) // Configuration defaults @@ -437,10 +459,10 @@ func InitConfig(config pkgconfigmodel.Config) { config.BindEnvAndSetDefault("network_path.connections_monitoring.enabled", false) config.BindEnvAndSetDefault("network_path.collector.workers", 4) config.BindEnvAndSetDefault("network_path.collector.timeout", DefaultNetworkPathTimeout) - config.BindEnvAndSetDefault("network_path.collector.max_ttl", 30) - config.BindEnvAndSetDefault("network_path.collector.input_chan_size", 1000) - config.BindEnvAndSetDefault("network_path.collector.processing_chan_size", 1000) - config.BindEnvAndSetDefault("network_path.collector.pathtest_contexts_limit", 10000) + config.BindEnvAndSetDefault("network_path.collector.max_ttl", DefaultNetworkPathMaxTTL) + config.BindEnvAndSetDefault("network_path.collector.input_chan_size", 100000) + config.BindEnvAndSetDefault("network_path.collector.processing_chan_size", 100000) + config.BindEnvAndSetDefault("network_path.collector.pathtest_contexts_limit", 100000) config.BindEnvAndSetDefault("network_path.collector.pathtest_ttl", "15m") config.BindEnvAndSetDefault("network_path.collector.pathtest_interval", "5m") config.BindEnvAndSetDefault("network_path.collector.flush_interval", "10s") @@ -716,6 +738,7 @@ func InitConfig(config pkgconfigmodel.Config) { config.BindEnvAndSetDefault("admission_controller.inject_config.socket_path", "/var/run/datadog") config.BindEnvAndSetDefault("admission_controller.inject_config.trace_agent_socket", "unix:///var/run/datadog/apm.socket") config.BindEnvAndSetDefault("admission_controller.inject_config.dogstatsd_socket", "unix:///var/run/datadog/dsd.socket") + config.BindEnvAndSetDefault("admission_controller.inject_config.type_socket_volumes", false) config.BindEnvAndSetDefault("admission_controller.inject_tags.enabled", true) config.BindEnvAndSetDefault("admission_controller.inject_tags.endpoint", "/injecttags") config.BindEnvAndSetDefault("admission_controller.inject_tags.pod_owners_cache_validity", 10) // in minutes @@ -969,6 +992,8 @@ func InitConfig(config pkgconfigmodel.Config) { config.BindEnvAndSetDefault("remote_policies", false) config.BindEnvAndSetDefault("installer.registry.url", "") config.BindEnvAndSetDefault("installer.registry.auth", "") + config.BindEnvAndSetDefault("installer.registry.username", "") + config.BindEnvAndSetDefault("installer.registry.password", "") config.BindEnv("fleet_policies_dir") config.SetDefault("fleet_layers", []string{}) @@ -1504,7 +1529,8 @@ func logsagent(config pkgconfigmodel.Setup) { config.BindEnvAndSetDefault("logs_config.auto_multi_line.tokenizer_max_input_bytes", 60) config.BindEnvAndSetDefault("logs_config.auto_multi_line.pattern_table_max_size", 20) config.BindEnvAndSetDefault("logs_config.auto_multi_line.pattern_table_match_threshold", 0.75) - config.BindEnvAndSetDefault("logs_config.tag_auto_multi_line_logs", false) + // Add a tag to logs that are multiline aggregated + config.BindEnvAndSetDefault("logs_config.tag_multi_line_logs", false) // Add a tag to logs that are truncated by the agent config.BindEnvAndSetDefault("logs_config.tag_truncated_logs", false) @@ -1541,12 +1567,16 @@ func logsagent(config pkgconfigmodel.Setup) { // more disk I/O at the wildcard log paths config.BindEnvAndSetDefault("logs_config.file_wildcard_selection_mode", "by_name") + // Max size in MB an integration logs file can use + config.BindEnvAndSetDefault("logs_config.integrations_logs_files_max_size", 10) + // Max disk usage in MB all integrations logs files are allowed to use in total + config.BindEnvAndSetDefault("logs_config.integrations_logs_total_usage", 100) + // Do not store logs on disk when the disk usage exceeds 80% of the disk capacity. + config.BindEnvAndSetDefault("logs_config.integrations_logs_disk_ratio", 0.80) + // SDS logs blocking mechanism config.BindEnvAndSetDefault("logs_config.sds.wait_for_configuration", "") config.BindEnvAndSetDefault("logs_config.sds.buffer_max_size", 0) - - // Max size in MB to allow for integrations logs files - config.BindEnvAndSetDefault("logs_config.integrations_logs_files_max_size", 100) } func vector(config pkgconfigmodel.Setup) { @@ -1848,6 +1878,8 @@ func findUnknownEnvVars(config pkgconfigmodel.Config, environ []string, addition "DD_TESTS_RUNTIME_COMPILED": {}, // this variable is used by the Kubernetes leader election mechanism "DD_POD_NAME": {}, + // this variable is used by tracers + "DD_INSTRUMENTATION_TELEMETRY_ENABLED": {}, } for _, key := range config.GetEnvVars() { knownVars[key] = struct{}{} diff --git a/pkg/config/config_change_checker.go b/pkg/config/setup/config_change_checker.go similarity index 99% rename from pkg/config/config_change_checker.go rename to pkg/config/setup/config_change_checker.go index 8ec0218cb70f9..674ad0d314eaf 100644 --- a/pkg/config/config_change_checker.go +++ b/pkg/config/setup/config_change_checker.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "fmt" diff --git a/pkg/config/config_change_checker_test.go b/pkg/config/setup/config_change_checker_test.go similarity index 98% rename from pkg/config/config_change_checker_test.go rename to pkg/config/setup/config_change_checker_test.go index b36baca084e3a..8fd8f2cf70351 100644 --- a/pkg/config/config_change_checker_test.go +++ b/pkg/config/setup/config_change_checker_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "testing" diff --git a/pkg/config/setup/config_test.go b/pkg/config/setup/config_test.go index d2b59e285c2a0..a52c698fb2e68 100644 --- a/pkg/config/setup/config_test.go +++ b/pkg/config/setup/config_test.go @@ -665,11 +665,11 @@ func TestNetworkPathDefaults(t *testing.T) { assert.Equal(t, false, config.GetBool("network_path.connections_monitoring.enabled")) assert.Equal(t, 4, config.GetInt("network_path.collector.workers")) - assert.Equal(t, 10000, config.GetInt("network_path.collector.timeout")) + assert.Equal(t, 1000, config.GetInt("network_path.collector.timeout")) assert.Equal(t, 30, config.GetInt("network_path.collector.max_ttl")) - assert.Equal(t, 1000, config.GetInt("network_path.collector.input_chan_size")) - assert.Equal(t, 1000, config.GetInt("network_path.collector.processing_chan_size")) - assert.Equal(t, 10000, config.GetInt("network_path.collector.pathtest_contexts_limit")) + assert.Equal(t, 100000, config.GetInt("network_path.collector.input_chan_size")) + assert.Equal(t, 100000, config.GetInt("network_path.collector.processing_chan_size")) + assert.Equal(t, 100000, config.GetInt("network_path.collector.pathtest_contexts_limit")) assert.Equal(t, 15*time.Minute, config.GetDuration("network_path.collector.pathtest_ttl")) assert.Equal(t, 5*time.Minute, config.GetDuration("network_path.collector.pathtest_interval")) assert.Equal(t, 10*time.Second, config.GetDuration("network_path.collector.flush_interval")) diff --git a/pkg/config/setup/constants/constants.go b/pkg/config/setup/constants/constants.go index d23620e464e22..7f68ba0973a77 100644 --- a/pkg/config/setup/constants/constants.go +++ b/pkg/config/setup/constants/constants.go @@ -9,4 +9,6 @@ package constants const ( // DefaultEBPFLessProbeAddr defines the default ebpfless probe address DefaultEBPFLessProbeAddr = "localhost:5678" + // ClusterIDCacheKey is the key name for the orchestrator cluster id in the agent in-mem cache + ClusterIDCacheKey = "orchestratorClusterID" ) diff --git a/pkg/config/setup/go.mod b/pkg/config/setup/go.mod index a4486128b46dd..0f7852291a486 100644 --- a/pkg/config/setup/go.mod +++ b/pkg/config/setup/go.mod @@ -12,6 +12,8 @@ replace ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../env github.com/DataDog/datadog-agent/pkg/config/model => ../model/ + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../../util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../util/filesystem @@ -36,6 +38,8 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 @@ -86,7 +90,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -103,12 +107,12 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/config/setup/go.sum b/pkg/config/setup/go.sum index a93fc7c02ab32..4d83c91b2f7dd 100644 --- a/pkg/config/setup/go.sum +++ b/pkg/config/setup/go.sum @@ -186,8 +186,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= @@ -258,15 +259,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -302,8 +303,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -319,8 +320,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/config/setup/otlp.go b/pkg/config/setup/otlp.go index 7ff9245d6196a..f6312342e319c 100644 --- a/pkg/config/setup/otlp.go +++ b/pkg/config/setup/otlp.go @@ -94,6 +94,5 @@ func setupOTLPEnvironmentVariables(config pkgconfigmodel.Setup) { config.BindEnv(OTLPSection + ".metrics.summaries.mode") // Debug settings - config.BindEnv(OTLPSection + ".debug.loglevel") // Deprecated config.BindEnv(OTLPSection + ".debug.verbosity") } diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go index 64a4fc53e273e..a083d716d2a35 100644 --- a/pkg/config/setup/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -167,6 +167,10 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { // User Tracer cfg.BindEnvAndSetDefault(join(diNS, "enabled"), false, "DD_DYNAMIC_INSTRUMENTATION_ENABLED") + cfg.BindEnvAndSetDefault(join(diNS, "offline_mode"), false, "DD_DYNAMIC_INSTRUMENTATION_OFFLINE_MODE") + cfg.BindEnvAndSetDefault(join(diNS, "probes_file_path"), false, "DD_DYNAMIC_INSTRUMENTATION_PROBES_FILE_PATH") + cfg.BindEnvAndSetDefault(join(diNS, "snapshot_output_file_path"), false, "DD_DYNAMIC_INSTRUMENTATION_SNAPSHOT_FILE_PATH") + cfg.BindEnvAndSetDefault(join(diNS, "diagnostics_output_file_path"), false, "DD_DYNAMIC_INSTRUMENTATION_DIAGNOSTICS_FILE_PATH") // network_tracer settings // we cannot use BindEnvAndSetDefault for network_config.enabled because we need to know if it was manually set. @@ -202,7 +206,7 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault(join(spNS, "enable_conntrack_all_namespaces"), true, "DD_SYSTEM_PROBE_ENABLE_CONNTRACK_ALL_NAMESPACES") cfg.BindEnvAndSetDefault(join(netNS, "enable_protocol_classification"), true, "DD_ENABLE_PROTOCOL_CLASSIFICATION") cfg.BindEnvAndSetDefault(join(netNS, "enable_ringbuffers"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_RINGBUFFERS") - cfg.BindEnvAndSetDefault(join(netNS, "enable_tcp_failed_connections"), false, "DD_SYSTEM_PROBE_NETWORK_ENABLE_FAILED_CONNS") + cfg.BindEnvAndSetDefault(join(netNS, "enable_tcp_failed_connections"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_FAILED_CONNS") cfg.BindEnvAndSetDefault(join(netNS, "ignore_conntrack_init_failure"), false, "DD_SYSTEM_PROBE_NETWORK_IGNORE_CONNTRACK_INIT_FAILURE") cfg.BindEnvAndSetDefault(join(netNS, "conntrack_init_timeout"), 10*time.Second) cfg.BindEnvAndSetDefault(join(netNS, "allow_netlink_conntracker_fallback"), true) @@ -396,6 +400,7 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { // Discovery config cfg.BindEnvAndSetDefault(join(discoveryNS, "enabled"), false) + cfg.BindEnvAndSetDefault(join(discoveryNS, "cpu_usage_update_delay"), "60s") // Fleet policies cfg.BindEnv("fleet_policies_dir") diff --git a/pkg/config/setup/system_probe_cws.go b/pkg/config/setup/system_probe_cws.go index 3497ffc143007..e904416d75d7e 100644 --- a/pkg/config/setup/system_probe_cws.go +++ b/pkg/config/setup/system_probe_cws.go @@ -86,7 +86,6 @@ func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.watch_dir", true) cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.cache_size", 10) cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.max_count", 400) - cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.remote_configuration.enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.dns_match_max_depth", 3) // CWS - Auto suppression diff --git a/pkg/config/structure/go.mod b/pkg/config/structure/go.mod new file mode 100644 index 0000000000000..131325228c46d --- /dev/null +++ b/pkg/config/structure/go.mod @@ -0,0 +1,90 @@ +module github.com/DataDog/datadog-agent/pkg/config/structure + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def + github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../comp/core/flare/builder + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../comp/core/flare/types + github.com/DataDog/datadog-agent/comp/core/secrets => ../../../comp/core/secrets + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry + github.com/DataDog/datadog-agent/comp/def => ../../../comp/def + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock + github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel + github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../pkg/util/testutil + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../pkg/util/winutil +) + +require ( + github.com/DataDog/datadog-agent/pkg/config/mock v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/stretchr/testify v1.9.0 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/viper v1.13.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.25.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/pkg/config/structure/go.sum b/pkg/config/structure/go.sum new file mode 100644 index 0000000000000..765bdc23a7bf4 --- /dev/null +++ b/pkg/config/structure/go.sum @@ -0,0 +1,353 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/pkg/config/structure/unmarshal.go b/pkg/config/structure/unmarshal.go new file mode 100644 index 0000000000000..c336a8bbc91d2 --- /dev/null +++ b/pkg/config/structure/unmarshal.go @@ -0,0 +1,557 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package structure defines a helper to retrieve structured data from the config +package structure + +import ( + "fmt" + "reflect" + "slices" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/DataDog/datadog-agent/pkg/config/model" +) + +// features allowed for handling edge-cases +type featureSet struct { + allowSquash bool + convertEmptyStrNil bool +} + +// UnmarshalKeyOption is an option that affects the enabled features in UnmarshalKey +type UnmarshalKeyOption func(*featureSet) + +// EnableSquash allows UnmarshalKey to take advantage of `mapstructure`s `squash` feature +// a squashed field hoists its fields up a level in the marshalled representation and directly embeds them +var EnableSquash UnmarshalKeyOption = func(fs *featureSet) { + fs.allowSquash = true +} + +// ConvertEmptyStringToNil allows UnmarshalKey to implicitly convert empty strings into nil slices +var ConvertEmptyStringToNil UnmarshalKeyOption = func(fs *featureSet) { + fs.convertEmptyStrNil = true +} + +// error for when a key is not found +var errNotFound = fmt.Errorf("not found") + +// UnmarshalKey retrieves data from the config at the given key and deserializes it +// to be stored on the target struct. It is implemented entirely using reflection, and +// does not depend upon details of the data model of the config. +// Target struct can use of struct tag of "yaml", "json", or "mapstructure" to rename fields +func UnmarshalKey(cfg model.Reader, key string, target interface{}, opts ...UnmarshalKeyOption) error { + fs := &featureSet{} + for _, o := range opts { + o(fs) + } + rawval := cfg.Get(key) + // Don't create a reflect.Value out of nil, just return immediately + if rawval == nil { + return nil + } + source, err := newNode(reflect.ValueOf(rawval)) + if err != nil { + return err + } + outValue := reflect.ValueOf(target) + if outValue.Kind() == reflect.Pointer { + outValue = reflect.Indirect(outValue) + } + switch outValue.Kind() { + case reflect.Map: + return copyMap(outValue, source, fs) + case reflect.Struct: + return copyStruct(outValue, source, fs) + case reflect.Slice: + if arr, ok := source.(arrayNode); ok { + return copyList(outValue, arr, fs) + } + if isEmptyString(source) { + if fs.convertEmptyStrNil { + return nil + } + return fmt.Errorf("treating empty string as a nil slice not allowed for UnmarshalKey without ConvertEmptyStrNil option") + } + return fmt.Errorf("can not UnmarshalKey to a slice from a non-list source: %T", source) + default: + return fmt.Errorf("can only UnmarshalKey to struct, map, or slice, got %v", outValue.Kind()) + } +} + +// leafNode represents a leaf with a scalar value + +type leafNode interface { + GetBool() (bool, error) + GetInt() (int, error) + GetFloat() (float64, error) + GetString() (string, error) +} + +type leafNodeImpl struct { + // val must be a scalar kind + val reflect.Value +} + +var _ leafNode = (*leafNodeImpl)(nil) +var _ node = (*leafNodeImpl)(nil) + +// arrayNode represents a node with an ordered array of children + +type arrayNode interface { + Size() int + Index(int) (node, error) +} + +type arrayNodeImpl struct { + // val must be a Slice with Len() and Index() + val reflect.Value +} + +var _ arrayNode = (*arrayNodeImpl)(nil) +var _ node = (*arrayNodeImpl)(nil) + +// node represents an arbitrary node of the tree + +type node interface { + GetChild(string) (node, error) + ChildrenKeys() ([]string, error) +} + +type innerNodeImpl struct { + // val must be a struct + val reflect.Value +} + +type innerMapNodeImpl struct { + // val must be a map[string]interface{} + val reflect.Value + // remapCase maps each lower-case key to the original case. This + // enables GetChild to retrieve values using case-insensitive keys + remapCase map[string]string +} + +var _ node = (*innerNodeImpl)(nil) +var _ node = (*innerMapNodeImpl)(nil) + +// all nodes, leaf, inner, and array nodes, each act as nodes +func newNode(v reflect.Value) (node, error) { + if v.Kind() == reflect.Struct { + return &innerNodeImpl{val: v}, nil + } else if v.Kind() == reflect.Map { + return &innerMapNodeImpl{val: v, remapCase: makeRemapCase(v)}, nil + } else if v.Kind() == reflect.Slice { + return &arrayNodeImpl{val: v}, nil + } else if isScalarKind(v) { + return &leafNodeImpl{val: v}, nil + } + return nil, fmt.Errorf("could not create node from: %v of type %T and kind %v", v, v, v.Kind()) +} + +// GetChild returns the child node at the given case-insensitive key, or an error if not found +func (n *innerNodeImpl) GetChild(key string) (node, error) { + findex := findFieldMatch(n.val, key) + if findex == -1 { + return nil, errNotFound + } + inner := n.val.Field(findex) + if inner.Kind() == reflect.Interface { + inner = inner.Elem() + } + return newNode(inner) +} + +// ChildrenKeys returns the list of keys of the children of the given node, if it is a map +func (n *innerNodeImpl) ChildrenKeys() ([]string, error) { + structType := n.val.Type() + keys := make([]string, 0, n.val.NumField()) + for i := 0; i < structType.NumField(); i++ { + f := structType.Field(i) + ch, _ := utf8.DecodeRuneInString(f.Name) + if unicode.IsLower(ch) { + continue + } + fieldKey, _ := fieldNameToKey(f) + keys = append(keys, fieldKey) + } + return keys, nil +} + +// GetChild returns the child node at the given case-insensitive key, or an error if not found +func (n *innerMapNodeImpl) GetChild(key string) (node, error) { + mkey := n.remapCase[strings.ToLower(key)] + inner := n.val.MapIndex(reflect.ValueOf(mkey)) + if !inner.IsValid() { + return nil, errNotFound + } + if inner.Kind() == reflect.Interface { + inner = inner.Elem() + } + return newNode(inner) +} + +// ChildrenKeys returns the list of keys of the children of the given node, if it is a map +func (n *innerMapNodeImpl) ChildrenKeys() ([]string, error) { + mapkeys := n.val.MapKeys() + keys := make([]string, 0, len(mapkeys)) + for _, kv := range mapkeys { + if kstr, ok := kv.Interface().(string); ok { + keys = append(keys, kstr) + } else { + return nil, fmt.Errorf("map node has invalid non-string key: %v", kv) + } + } + // map keys are iterated non-deterministically, sort them + slices.Sort(keys) + return keys, nil +} + +// GetChild returns an error because array node does not have children accessible by name +func (n *arrayNodeImpl) GetChild(string) (node, error) { + return nil, fmt.Errorf("arrayNodeImpl.GetChild not implemented") +} + +// ChildrenKeys returns an error because array node does not have children accessible by name +func (n *arrayNodeImpl) ChildrenKeys() ([]string, error) { + return nil, fmt.Errorf("arrayNodeImpl.ChildrenKeys not implemented") +} + +// Size returns number of children in the list +func (n *arrayNodeImpl) Size() int { + return n.val.Len() +} + +// Index returns the kth element of the list +func (n *arrayNodeImpl) Index(k int) (node, error) { + // arrayNodeImpl assumes val is an Array with Len() and Index() + elem := n.val.Index(k) + if elem.Kind() == reflect.Interface { + elem = elem.Elem() + } + return newNode(elem) +} + +// GetChild returns an error because a leaf has no children +func (n *leafNodeImpl) GetChild(key string) (node, error) { + return nil, fmt.Errorf("can't GetChild(%s) of a leaf node", key) +} + +// ChildrenKeys returns an error because a leaf has no children +func (n *leafNodeImpl) ChildrenKeys() ([]string, error) { + return nil, fmt.Errorf("can't get ChildrenKeys of a leaf node") +} + +// GetBool returns the scalar as a bool, or an error otherwise +func (n *leafNodeImpl) GetBool() (bool, error) { + if n.val.Kind() == reflect.Bool { + return n.val.Bool(), nil + } else if n.val.Kind() == reflect.Int { + return n.val.Int() != 0, nil + } else if n.val.Kind() == reflect.String { + return convertToBool(n.val.String()) + } + return false, newConversionError(n.val, "bool") +} + +// GetInt returns the scalar as a int, or an error otherwise +func (n *leafNodeImpl) GetInt() (int, error) { + switch n.val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int(n.val.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int(n.val.Uint()), nil + case reflect.Float32, reflect.Float64: + return int(n.val.Float()), nil + } + return 0, newConversionError(n.val, "int") +} + +// GetFloat returns the scalar as a float64, or an error otherwise +func (n *leafNodeImpl) GetFloat() (float64, error) { + switch n.val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(n.val.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(n.val.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(n.val.Float()), nil + } + return 0, newConversionError(n.val, "float") +} + +// GetString returns the scalar as a string, or an error otherwise +func (n *leafNodeImpl) GetString() (string, error) { + switch n.val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + stringVal := strconv.FormatInt(n.val.Int(), 10) + return stringVal, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + stringVal := strconv.FormatUint(n.val.Uint(), 10) + return stringVal, nil + case reflect.Float32: + stringVal := strconv.FormatFloat(n.val.Float(), 'f', -1, 32) + return stringVal, nil + case reflect.Float64: + stringVal := strconv.FormatFloat(n.val.Float(), 'f', -1, 64) + return stringVal, nil + case reflect.String: + return n.val.String(), nil + } + return "", newConversionError(n.val, "string") +} + +// convert a string to a bool using standard yaml constants +func convertToBool(text string) (bool, error) { + lower := strings.ToLower(text) + if lower == "y" || lower == "yes" || lower == "on" || lower == "true" || lower == "1" { + return true, nil + } else if lower == "n" || lower == "no" || lower == "off" || lower == "false" || lower == "0" { + return false, nil + } + return false, newConversionError(reflect.ValueOf(text), "bool") +} + +type specifierSet map[string]struct{} + +// fieldNameToKey returns the lower-cased field name, for case insensitive comparisons, +// with struct tag rename applied, as well as the set of specifiers from struct tags +// struct tags are handled in order of yaml, then json, then mapstructure +func fieldNameToKey(field reflect.StructField) (string, specifierSet) { + name := field.Name + + tagtext := "" + if val := field.Tag.Get("yaml"); val != "" { + tagtext = val + } else if val := field.Tag.Get("json"); val != "" { + tagtext = val + } else if val := field.Tag.Get("mapstructure"); val != "" { + tagtext = val + } + + // skip any additional specifiers such as ",omitempty" or ",squash" + // TODO: support multiple specifiers + var specifiers map[string]struct{} + if commaPos := strings.IndexRune(tagtext, ','); commaPos != -1 { + specifiers = make(map[string]struct{}) + val := tagtext[:commaPos] + specifiers[tagtext[commaPos+1:]] = struct{}{} + if val != "" { + name = val + } + } else if tagtext != "" { + name = tagtext + } + return strings.ToLower(name), specifiers +} + +func copyStruct(target reflect.Value, source node, fs *featureSet) error { + targetType := target.Type() + for i := 0; i < targetType.NumField(); i++ { + f := targetType.Field(i) + ch, _ := utf8.DecodeRuneInString(f.Name) + if unicode.IsLower(ch) { + continue + } + fieldKey, specifiers := fieldNameToKey(f) + if _, ok := specifiers["squash"]; ok { + if !fs.allowSquash { + return fmt.Errorf("feature 'squash' not allowed for UnmarshalKey without EnableSquash option") + } + err := copyAny(target.FieldByName(f.Name), source, fs) + if err != nil { + return err + } + continue + } + child, err := source.GetChild(fieldKey) + if err == errNotFound { + continue + } + if err != nil { + return err + } + err = copyAny(target.FieldByName(f.Name), child, fs) + if err != nil { + return err + } + } + return nil +} + +func copyMap(target reflect.Value, source node, _ *featureSet) error { + // TODO: Should handle maps with more complex types in a future PR + ktype := reflect.TypeOf("") + vtype := reflect.TypeOf("") + mtype := reflect.MapOf(ktype, vtype) + results := reflect.MakeMap(mtype) + + mapKeys, err := source.ChildrenKeys() + if err != nil { + return err + } + for _, mkey := range mapKeys { + child, err := source.GetChild(mkey) + if err != nil { + return err + } + if child == nil { + continue + } + if scalar, ok := child.(leafNode); ok { + if mval, err := scalar.GetString(); err == nil { + results.SetMapIndex(reflect.ValueOf(mkey), reflect.ValueOf(mval)) + } else { + return fmt.Errorf("TODO: only map[string]string supported currently") + } + } + } + target.Set(results) + return nil +} + +func copyLeaf(target reflect.Value, source leafNode, _ *featureSet) error { + if source == nil { + return fmt.Errorf("source value is not a scalar") + } + switch target.Kind() { + case reflect.Bool: + v, err := source.GetBool() + if err != nil { + return err + } + target.SetBool(v) + return nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, err := source.GetInt() + if err != nil { + return err + } + target.SetInt(int64(v)) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v, err := source.GetInt() + if err != nil { + return err + } + target.SetUint(uint64(v)) + return nil + case reflect.Float32, reflect.Float64: + v, err := source.GetFloat() + if err != nil { + return err + } + target.SetFloat(float64(v)) + return nil + case reflect.String: + v, err := source.GetString() + if err != nil { + return err + } + target.SetString(v) + return nil + } + return fmt.Errorf("unsupported scalar type %v", target.Kind()) +} + +func copyList(target reflect.Value, source arrayNode, fs *featureSet) error { + if source == nil { + return fmt.Errorf("source value is not a list") + } + elemType := target.Type() + elemType = elemType.Elem() + numElems := source.Size() + results := reflect.MakeSlice(reflect.SliceOf(elemType), numElems, numElems) + for k := 0; k < numElems; k++ { + elemSource, err := source.Index(k) + if err != nil { + return err + } + ptrOut := reflect.New(elemType) + outTarget := ptrOut.Elem() + err = copyAny(outTarget, elemSource, fs) + if err != nil { + return err + } + results.Index(k).Set(outTarget) + } + target.Set(results) + return nil +} + +func copyAny(target reflect.Value, source node, fs *featureSet) error { + if target.Kind() == reflect.Pointer { + allocPtr := reflect.New(target.Type().Elem()) + target.Set(allocPtr) + target = allocPtr.Elem() + } + if isScalarKind(target) { + if leaf, ok := source.(leafNode); ok { + return copyLeaf(target, leaf, fs) + } + return fmt.Errorf("can't copy into target: scalar required, but source is not a leaf") + } else if target.Kind() == reflect.Map { + return copyMap(target, source, fs) + } else if target.Kind() == reflect.Struct { + return copyStruct(target, source, fs) + } else if target.Kind() == reflect.Slice { + if arr, ok := source.(arrayNode); ok { + return copyList(target, arr, fs) + } + return fmt.Errorf("can't copy into target: []T required, but source is not an array") + } else if target.Kind() == reflect.Invalid { + return fmt.Errorf("can't copy invalid value %s : %v", target, target.Kind()) + } + return fmt.Errorf("unknown value to copy: %v", target.Type()) +} + +func isEmptyString(source node) bool { + if leaf, ok := source.(leafNode); ok { + if str, err := leaf.GetString(); err == nil { + return str == "" + } + } + return false +} + +func isScalarKind(v reflect.Value) bool { + k := v.Kind() + return (k >= reflect.Bool && k <= reflect.Float64) || k == reflect.String +} + +func makeRemapCase(v reflect.Value) map[string]string { + remap := make(map[string]string) + iter := v.MapRange() + for iter.Next() { + mkey := "" + switch k := iter.Key().Interface().(type) { + case string: + mkey = k + default: + mkey = fmt.Sprintf("%s", k) + } + remap[strings.ToLower(mkey)] = mkey + } + return remap +} + +func findFieldMatch(val reflect.Value, key string) int { + // case-insensitive match for struct names + key = strings.ToLower(key) + schema := val.Type() + for i := 0; i < schema.NumField(); i++ { + fieldKey, _ := fieldNameToKey(schema.Field(i)) + if key == fieldKey { + return i + } + } + return -1 +} + +func newConversionError(v reflect.Value, expectType string) error { + return fmt.Errorf("could not convert to %s: %v of type %T and Kind %v", expectType, v, v, v.Kind()) +} diff --git a/pkg/config/structure/unmarshal_test.go b/pkg/config/structure/unmarshal_test.go new file mode 100644 index 0000000000000..fffeb2bf43c17 --- /dev/null +++ b/pkg/config/structure/unmarshal_test.go @@ -0,0 +1,835 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package structure + +import ( + "math" + "reflect" + "strings" + "testing" + + "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/stretchr/testify/assert" +) + +// Struct that is used within the config +type userV3 struct { + Username string `yaml:"user"` + UsernameLegacy string `yaml:"username"` + AuthKey string `yaml:"authKey"` + AuthProtocol string `yaml:"authProtocol"` + PrivKey string `yaml:"privKey"` + PrivProtocol string `yaml:"privProtocol"` +} + +// Type that gets parsed out of config +type trapsConfig struct { + Enabled bool `yaml:"enabled"` + Port uint16 `yaml:"port"` + Users []userV3 `yaml:"users"` + CommunityStrings []string `yaml:"community_strings"` + BindHost string `yaml:"bind_host"` + StopTimeout int `yaml:"stop_timeout"` + Namespace string `yaml:"namespace"` +} + +func TestUnmarshalKeyTrapsConfig(t *testing.T) { + confYaml := ` +network_devices: + snmp_traps: + enabled: true + port: 1234 + community_strings: ["a","b","c"] + users: + - user: alice + authKey: hunter2 + authProtocol: MD5 + privKey: pswd + privProtocol: AE5 + - user: bob + authKey: "123456" + authProtocol: MD5 + privKey: secret + privProtocol: AE5 + bind_host: ok + stop_timeout: 4 + namespace: abc +` + mockConfig := mock.NewFromYAML(t, confYaml) + + var trapsCfg = trapsConfig{} + err := UnmarshalKey(mockConfig, "network_devices.snmp_traps", &trapsCfg) + assert.NoError(t, err) + + assert.Equal(t, trapsCfg.Enabled, true) + assert.Equal(t, trapsCfg.Port, uint16(1234)) + assert.Equal(t, trapsCfg.CommunityStrings, []string{"a", "b", "c"}) + + assert.Equal(t, len(trapsCfg.Users), 2) + assert.Equal(t, trapsCfg.Users[0].Username, "alice") + assert.Equal(t, trapsCfg.Users[0].AuthKey, "hunter2") + assert.Equal(t, trapsCfg.Users[0].AuthProtocol, "MD5") + assert.Equal(t, trapsCfg.Users[0].PrivKey, "pswd") + assert.Equal(t, trapsCfg.Users[0].PrivProtocol, "AE5") + assert.Equal(t, trapsCfg.Users[1].Username, "bob") + assert.Equal(t, trapsCfg.Users[1].AuthKey, "123456") + assert.Equal(t, trapsCfg.Users[1].AuthProtocol, "MD5") + assert.Equal(t, trapsCfg.Users[1].PrivKey, "secret") + assert.Equal(t, trapsCfg.Users[1].PrivProtocol, "AE5") + + assert.Equal(t, trapsCfg.BindHost, "ok") + assert.Equal(t, trapsCfg.StopTimeout, 4) + assert.Equal(t, trapsCfg.Namespace, "abc") +} + +type serviceDescription struct { + Host string + Endpoint endpoint `mapstructure:",squash"` +} + +type endpoint struct { + Name string `yaml:"name"` + APIKey string `yaml:"apikey"` +} + +func TestUnmarshalKeySliceOfStructures(t *testing.T) { + testcases := []struct { + name string + conf string + want []endpoint + }{ + { + name: "simple wellformed", + conf: ` +endpoints: +- name: intake + apikey: abc1 +- name: config + apikey: abc2 +- name: health + apikey: abc3 +`, + want: []endpoint{ + {Name: "intake", APIKey: "abc1"}, + {Name: "config", APIKey: "abc2"}, + {Name: "health", APIKey: "abc3"}, + }, + }, + { + name: "missing a field is zero value", + conf: ` +endpoints: +- name: intake +- name: config + apikey: abc2 +`, + want: []endpoint{ + {Name: "intake", APIKey: ""}, + {Name: "config", APIKey: "abc2"}, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + mockConfig := mock.NewFromYAML(t, tc.conf) + mockConfig.SetKnown("endpoints") + + var endpoints = []endpoint{} + err := UnmarshalKey(mockConfig, "endpoints", &endpoints) + assert.NoError(t, err, "%s failed to marshal: %s", tc.name, err) + + assert.Equal(t, len(endpoints), len(tc.want), "%s marshalled unexepected length of slices, wanted: %s got: %s", tc.name, len(tc.want), len(endpoints)) + for i := range endpoints { + assert.Equal(t, endpoints[i].Name, tc.want[i].Name) + assert.Equal(t, endpoints[i].APIKey, tc.want[i].APIKey) + } + }) + } +} + +func TestUnmarshalKeyWithSquash(t *testing.T) { + confYaml := ` +service: + host: datad0g.com + name: intake + apikey: abc1 +` + mockConfig := mock.NewFromYAML(t, confYaml) + mockConfig.SetKnown("service") + + var svc = serviceDescription{} + // fails without EnableSquash being given + err := UnmarshalKey(mockConfig, "service", &svc) + assert.Error(t, err) + assert.Contains(t, err.Error(), "EnableSquash") + + // succeeds + err = UnmarshalKey(mockConfig, "service", &svc, EnableSquash) + assert.NoError(t, err) + + assert.Equal(t, svc.Host, "datad0g.com") + assert.Equal(t, svc.Endpoint.Name, "intake") + assert.Equal(t, svc.Endpoint.APIKey, "abc1") +} + +type featureConfig struct { + Enabled bool `yaml:"enabled"` +} + +func TestUnmarshalKeyAsBool(t *testing.T) { + testcases := []struct { + name string + conf string + want bool + skip bool + }{ + { + name: "string value to true", + conf: ` +feature: + enabled: "true" +`, + want: true, + skip: false, + }, + { + name: "yaml boolean value true", + conf: ` +feature: + enabled: true +`, + want: true, + skip: false, + }, + { + name: "string value to false", + conf: ` +feature: + enabled: "false" +`, + want: false, + skip: false, + }, + { + name: "yaml boolean value false", + conf: ` +feature: + enabled: false +`, + want: false, + skip: false, + }, + { + name: "missing value is false", + conf: ` +feature: + not_enabled: "the missing key should be false" +`, + want: false, + skip: false, + }, + { + name: "string 'y' value is true", + conf: ` +feature: + enabled: y +`, + want: true, + skip: false, + }, + { + name: "string 'yes' value is true", + conf: ` +feature: + enabled: yes +`, + want: true, + skip: false, + }, + { + name: "string 'on' value is true", + conf: ` +feature: + enabled: on +`, + want: true, + skip: false, + }, + { + name: "string '1' value is true", + conf: ` +feature: + enabled: "1" +`, + want: true, + skip: false, + }, + { + name: "int 1 value is true", + conf: ` +feature: + enabled: 1 +`, + want: true, + skip: false, + }, + { + name: "string 'n' value is false", + conf: ` +feature: + enabled: n +`, + want: false, + skip: false, + }, + { + name: "string 'no' value is false", + conf: ` +feature: + enabled: no +`, + want: false, + skip: false, + }, + { + name: "string 'off' value is false", + conf: ` +feature: + enabled: off +`, + want: false, + skip: false, + }, + { + name: "string '0' value is false", + conf: ` +feature: + enabled: "0" +`, + want: false, + skip: false, + }, + { + name: "int 0 value is false", + conf: ` +feature: + enabled: 0 +`, + want: false, + skip: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if tc.skip { + t.Skip("Skipping test case") + } + + mockConfig := mock.NewFromYAML(t, tc.conf) + mockConfig.SetKnown("feature") + + var feature = featureConfig{} + err := UnmarshalKey(mockConfig, "feature", &feature) + assert.NoError(t, err, "%s failed to marshal: %s", tc.name, err) + + assert.Equal(t, feature.Enabled, tc.want, "%s unexpected marshal value, want: %s got: %s", tc.name, tc.want, feature.Enabled) + }) + } +} + +type uintConfig struct { + Fielduint8 uint8 `yaml:"uint8"` + Fielduint16 uint16 `yaml:"uint16"` + Fielduint32 uint32 `yaml:"uint32"` + Fielduint64 uint64 `yaml:"uint64"` + Fieldint8 int8 `yaml:"int8"` + Fieldint16 int16 `yaml:"int16"` + Fieldint32 int32 `yaml:"int32"` + Fieldint64 int64 `yaml:"int64"` +} + +func TestUnmarshalKeyAsInt(t *testing.T) { + testcases := []struct { + name string + conf string + want uintConfig + skip bool + }{ + { + name: "value int config map", + conf: ` +feature: + uint8: 123 + uint16: 1234 + uint32: 1234 + uint64: 1234 + int8: 123 + int16: 1234 + int32: 1234 + int64: 1234 +`, + want: uintConfig{ + Fielduint8: 123, + Fielduint16: 1234, + Fielduint32: 1234, + Fielduint64: 1234, + Fieldint8: 123, + Fieldint16: 1234, + Fieldint32: 1234, + Fieldint64: 1234, + }, + skip: false, + }, + { + name: "float convert to int config map", + conf: ` +feature: + uint8: 12.0 + uint16: 1234.0 + uint32: 1234 + uint64: 1234 + int8: 12.3 + int16: 12.9 + int32: 12.34 + int64: -12.34 +`, + want: uintConfig{ + Fielduint8: 12, + Fielduint16: 1234, + Fielduint32: 1234, + Fielduint64: 1234, + Fieldint8: 12, + Fieldint16: 12, // expected truncation of the decimal, no rounding + Fieldint32: 12, + Fieldint64: -12, + }, + skip: false, + }, + { + name: "missing field is zero value config map", + conf: ` +feature: + uint16: 1234 + uint32: 1234 + uint64: 1234 + int8: 123 + int16: 1234 + int32: 1234 + int64: 1234 +`, + want: uintConfig{ + Fielduint8: 0, + Fielduint16: 1234, + Fielduint32: 1234, + Fielduint64: 1234, + Fieldint8: 123, + Fieldint16: 1234, + Fieldint32: 1234, + Fieldint64: 1234, + }, + skip: false, + }, + { + name: "overflow int config map", + conf: ` +feature: + uint8: 1234 + uint16: 1234 + uint32: 1234 + uint64: 1234 + int8: 123 + int16: 1234 + int32: 1234 + int64: 1234 +`, + want: uintConfig{ + Fielduint8: math.MaxUint8, // actual 230 - unclear what this behavior should be + Fielduint16: 1234, + Fielduint32: 1234, + Fielduint64: 1234, + Fieldint8: 123, + Fieldint16: 1234, + Fieldint32: 1234, + Fieldint64: 1234, + }, + skip: true, + }, + { + name: "underflow int config map", + conf: ` +feature: + uint8: -123 + uint16: 1234 + uint32: 1234 + uint64: 1234 + int8: 123 + int16: 1234 + int32: 1234 + int64: 1234 +`, + want: uintConfig{ + Fielduint8: 0, // actual 133 - unclear what this behavior should be + Fielduint16: 1234, + Fielduint32: 1234, + Fielduint64: 1234, + Fieldint8: 123, + Fieldint16: 1234, + Fieldint32: 1234, + Fieldint64: 1234, + }, + skip: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if tc.skip { + t.Skip("Skipping test case") + } + + mockConfig := mock.NewFromYAML(t, tc.conf) + mockConfig.SetKnown("feature") + + var feature = uintConfig{} + err := UnmarshalKey(mockConfig, "feature", &feature) + assert.NoError(t, err, "%s failed to marshal: %s", tc.name, err) + if err != nil { + t.FailNow() + } + + confvalues := reflect.ValueOf(feature) + wantvalues := reflect.ValueOf(tc.want) + + for i := 0; i < confvalues.NumField(); i++ { + wantType := strings.ReplaceAll(confvalues.Type().Field(i).Name, "Field", "") + actual := confvalues.Field(i).Type().Name() + assert.Equal(t, wantType, actual, "%s unexpected marshal type, want: %s got: %s", tc.name, wantType, actual) + assert.True(t, reflect.DeepEqual(wantvalues.Field(i).Interface(), confvalues.Field(i).Interface()), "%s marshalled values not equal, want: %s, got: %s", tc.name, wantvalues.Field(i), confvalues.Field(i)) + } + }) + } +} + +type floatConfig struct { + Fieldfloat32 float32 `yaml:"float32"` + Fieldfloat64 float64 `yaml:"float64"` +} + +func TestUnmarshalKeyAsFloat(t *testing.T) { + testcases := []struct { + name string + conf string + want floatConfig + skip bool + }{ + { + name: "value float config map", + conf: ` +feature: + float32: 12.34 + float64: 12.34 +`, + want: floatConfig{ + Fieldfloat32: 12.34, + Fieldfloat64: 12.34, + }, + skip: false, + }, + { + name: "missing field zero value float config map", + conf: ` +feature: + float64: 12.34 +`, + want: floatConfig{ + Fieldfloat32: 0.0, + Fieldfloat64: 12.34, + }, + skip: false, + }, + { + name: "converts ints to float config map", + conf: ` +feature: + float32: 12 + float64: 12 +`, + want: floatConfig{ + Fieldfloat32: 12.0, + Fieldfloat64: 12.0, + }, + skip: false, + }, + { + name: "converts negatives to float config map", + conf: ` +feature: + float32: -12 + float64: -12.34 +`, + want: floatConfig{ + Fieldfloat32: -12.0, + Fieldfloat64: -12.34, + }, + skip: false, + }, + { + name: "starting decimal to float config map", + conf: ` +feature: + float32: .34 + float64: -.34 +`, + want: floatConfig{ + Fieldfloat32: 0.34, + Fieldfloat64: -0.34, + }, + skip: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if tc.skip { + t.Skip("Skipping test case") + } + + mockConfig := mock.NewFromYAML(t, tc.conf) + mockConfig.SetKnown("feature") + + var feature = floatConfig{} + err := UnmarshalKey(mockConfig, "feature", &feature) + assert.NoError(t, err, "%s failed to marshal: %s", tc.name, err) + if err != nil { + t.FailNow() + } + + confvalues := reflect.ValueOf(feature) + wantvalues := reflect.ValueOf(tc.want) + + for i := 0; i < confvalues.NumField(); i++ { + wantType := strings.ReplaceAll(confvalues.Type().Field(i).Name, "Field", "") + actual := confvalues.Field(i).Type().Name() + assert.Equal(t, wantType, actual, "%s unexpected marshal type, want: %s got: %s", tc.name, wantType, actual) + assert.True(t, reflect.DeepEqual(wantvalues.Field(i).Interface(), confvalues.Field(i).Interface()), "%s marshalled values not equal, want: %s, got: %s", tc.name, wantvalues.Field(i), confvalues.Field(i)) + } + }) + } +} + +type stringConfig struct { + Field string `yaml:"value"` +} + +func TestUnmarshalKeyAsString(t *testing.T) { + testcases := []struct { + name string + conf string + want stringConfig + skip bool + }{ + { + name: "string value config map", + conf: ` +feature: + value: a string +`, + want: stringConfig{ + Field: "a string", + }, + skip: false, + }, + { + name: "quoted string config map", + conf: ` +feature: + value: "12.34" +`, + want: stringConfig{ + Field: "12.34", + }, + skip: false, + }, + { + name: "missing field is a empty string", + conf: ` +feature: + float64: 12.34 +`, + want: stringConfig{ + Field: string(""), + }, + skip: false, + }, + { + name: "converts yaml parsed int to match struct", + conf: ` +feature: + value: 42 +`, + want: stringConfig{ + Field: "42", + }, + skip: false, + }, + { + name: "truncates large yaml floats instead of using exponents", + conf: ` +feature: + value: 4.2222222222222222222222 +`, + want: stringConfig{ + Field: "4.222222222222222", + }, + skip: false, + }, + { + name: "converts yaml parsed float to match struct", + conf: ` +feature: + value: 4.2 +`, + want: stringConfig{ + Field: "4.2", + }, + skip: false, + }, + { + name: "commas are part of the string and not a list", + conf: ` +feature: + value: not, a, list +`, + want: stringConfig{ + Field: "not, a, list", + }, + skip: false, + }, + { + name: "parses special characters", + conf: ` +feature: + value: ☺☻☹ +`, + want: stringConfig{ + Field: "☺☻☹", + }, + skip: false, + }, + { + name: "does not parse invalid ascii to byte sequences", + conf: ` +feature: + value: \xff-\xff +`, + want: stringConfig{ + Field: `\xff-\xff`, + }, + skip: false, + }, + { + name: "retains string utf-8", + conf: ` +feature: + value: 日本語 +`, + want: stringConfig{ + Field: "日本語", + }, + skip: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if tc.skip { + t.Skip("Skipping test case") + } + + mockConfig := mock.NewFromYAML(t, tc.conf) + mockConfig.SetKnown("feature") + + var feature = stringConfig{} + err := UnmarshalKey(mockConfig, "feature", &feature) + assert.NoError(t, err, "%s failed to marshal: %s", tc.name, err) + if err != nil { + t.FailNow() + } + + confvalues := reflect.ValueOf(feature) + wantvalues := reflect.ValueOf(tc.want) + + for i := 0; i < confvalues.NumField(); i++ { + wantType := "string" + actual := confvalues.Field(i).Type().Name() + assert.Equal(t, wantType, actual, "%s unexpected marshal type, want: %s got: %s", tc.name, wantType, actual) + assert.True(t, reflect.DeepEqual(wantvalues.Field(i).Interface(), confvalues.Field(i).Interface()), "%s marshalled values not equal, want: %s, got: %s", tc.name, wantvalues.Field(i), confvalues.Field(i)) + } + }) + } +} + +type featureConfigDiffCase struct { + ENaBLEd bool +} + +func TestUnmarshalKeyCaseInsensitive(t *testing.T) { + confYaml := ` +feature: + EnABLeD: "true" +` + mockConfig := mock.NewFromYAML(t, confYaml) + mockConfig.SetKnown("feature") + + var feature = featureConfig{} + err := UnmarshalKey(mockConfig, "feature", &feature) + assert.NoError(t, err) + + assert.Equal(t, feature.Enabled, true) + + var diffcase = featureConfigDiffCase{} + err = UnmarshalKey(mockConfig, "feature", &diffcase) + assert.NoError(t, err) + + assert.Equal(t, diffcase.ENaBLEd, true) +} + +func TestUnmarshalKeyMissing(t *testing.T) { + confYaml := ` +feature: + enabled: "true" +` + mockConfig := mock.NewFromYAML(t, confYaml) + mockConfig.SetKnown("feature") + + // If the data from the config is missing, UnmarshalKey is a no-op, does + // nothing, and returns no error + var endpoints = []endpoint{} + err := UnmarshalKey(mockConfig, "config_providers", &endpoints) + assert.NoError(t, err) +} + +func TestMapGetChildNotFound(t *testing.T) { + m := map[string]string{"a": "apple", "b": "banana"} + n, err := newNode(reflect.ValueOf(m)) + assert.NoError(t, err) + + val, err := n.GetChild("a") + assert.NoError(t, err) + str, err := val.(leafNode).GetString() + assert.NoError(t, err) + assert.Equal(t, str, "apple") + + _, err = n.GetChild("c") + assert.Error(t, err) + assert.Equal(t, err.Error(), "not found") + + keys, err := n.ChildrenKeys() + assert.NoError(t, err) + assert.Equal(t, keys, []string{"a", "b"}) +} diff --git a/pkg/config/teeconfig/go.mod b/pkg/config/teeconfig/go.mod new file mode 100644 index 0000000000000..6756cc2c5cd58 --- /dev/null +++ b/pkg/config/teeconfig/go.mod @@ -0,0 +1,35 @@ +module github.com/DataDog/datadog-agent/pkg/config/teeconfig + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber +) + +require ( + github.com/DataDog/datadog-agent/pkg/config/model v0.0.0-00010101000000-000000000000 + github.com/DataDog/viper v1.13.5 + github.com/spf13/afero v1.11.0 +) + +require ( + github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/spf13/cast v1.3.0 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.3 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/pkg/config/teeconfig/go.sum b/pkg/config/teeconfig/go.sum new file mode 100644 index 0000000000000..71e16088460f3 --- /dev/null +++ b/pkg/config/teeconfig/go.sum @@ -0,0 +1,254 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/pkg/config/teeconfig/teeconfig.go b/pkg/config/teeconfig/teeconfig.go new file mode 100644 index 0000000000000..93f7f08511a14 --- /dev/null +++ b/pkg/config/teeconfig/teeconfig.go @@ -0,0 +1,410 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package teeconfig is a tee of two configs that writes to both but reads from only one +package teeconfig + +import ( + "io" + "strings" + "time" + + "github.com/DataDog/viper" + "github.com/spf13/afero" + + "github.com/DataDog/datadog-agent/pkg/config/model" +) + +// teeConfig is a combination of two configs, both get written to but only baseline is read +type teeConfig struct { + baseline model.Config + compare model.Config +} + +// NewTeeConfig constructs a new teeConfig +func NewTeeConfig(baseline, compare model.Config) model.Config { + return &teeConfig{baseline: baseline, compare: compare} +} + +// OnUpdate adds a callback to the list receivers to be called each time a value is changed in the configuration +// by a call to the 'Set' method. +// Callbacks are only called if the value is effectively changed. +func (t *teeConfig) OnUpdate(callback model.NotificationReceiver) { + t.baseline.OnUpdate(callback) + t.compare.OnUpdate(callback) +} + +// Set wraps Viper for concurrent access +func (t *teeConfig) Set(key string, newValue interface{}, source model.Source) { + t.baseline.Set(key, newValue, source) + t.compare.Set(key, newValue, source) +} + +// SetWithoutSource sets the given value using source Unknown +func (t *teeConfig) SetWithoutSource(key string, value interface{}) { + t.baseline.SetWithoutSource(key, value) + t.compare.SetWithoutSource(key, value) +} + +// SetDefault wraps Viper for concurrent access +func (t *teeConfig) SetDefault(key string, value interface{}) { + t.baseline.SetDefault(key, value) + t.compare.SetDefault(key, value) +} + +// UnsetForSource unsets a config entry for a given source +func (t *teeConfig) UnsetForSource(key string, source model.Source) { + t.baseline.UnsetForSource(key, source) + t.compare.UnsetForSource(key, source) +} + +// SetKnown adds a key to the set of known valid config keys +func (t *teeConfig) SetKnown(key string) { + t.baseline.SetKnown(key) + t.compare.SetKnown(key) +} + +// IsKnown returns whether a key is known +func (t *teeConfig) IsKnown(key string) bool { + return t.baseline.IsKnown(key) +} + +// GetKnownKeysLowercased returns all the keys that meet at least one of these criteria: +// 1) have a default, 2) have an environment variable binded or 3) have been SetKnown() +// Note that it returns the keys lowercased. +func (t *teeConfig) GetKnownKeysLowercased() map[string]interface{} { + return t.baseline.GetKnownKeysLowercased() +} + +// ParseEnvAsStringSlice registers a transformer function to parse an an environment variables as a []string. +func (t *teeConfig) ParseEnvAsStringSlice(key string, fn func(string) []string) { + t.baseline.ParseEnvAsStringSlice(key, fn) + t.compare.ParseEnvAsStringSlice(key, fn) +} + +// ParseEnvAsMapStringInterface registers a transformer function to parse an an environment variables as a +// map[string]interface{}. +func (t *teeConfig) ParseEnvAsMapStringInterface(key string, fn func(string) map[string]interface{}) { + t.baseline.ParseEnvAsMapStringInterface(key, fn) + t.compare.ParseEnvAsMapStringInterface(key, fn) +} + +// ParseEnvAsSliceMapString registers a transformer function to parse an an environment variables as a []map[string]string. +func (t *teeConfig) ParseEnvAsSliceMapString(key string, fn func(string) []map[string]string) { + t.baseline.ParseEnvAsSliceMapString(key, fn) + t.compare.ParseEnvAsSliceMapString(key, fn) +} + +// ParseEnvAsSlice registers a transformer function to parse an an environment variables as a +// []interface{}. +func (t *teeConfig) ParseEnvAsSlice(key string, fn func(string) []interface{}) { + t.baseline.ParseEnvAsSlice(key, fn) + t.compare.ParseEnvAsSlice(key, fn) +} + +// SetFs wraps Viper for concurrent access +func (t *teeConfig) SetFs(fs afero.Fs) { + t.baseline.SetFs(fs) + t.compare.SetFs(fs) +} + +// IsSet wraps Viper for concurrent access +func (t *teeConfig) IsSet(key string) bool { + return t.baseline.IsSet(key) +} + +func (t *teeConfig) AllKeysLowercased() []string { + return t.baseline.AllKeysLowercased() +} + +// Get wraps Viper for concurrent access +func (t *teeConfig) Get(key string) interface{} { + return t.baseline.Get(key) +} + +// GetAllSources returns the value of a key for each source +func (t *teeConfig) GetAllSources(key string) []model.ValueWithSource { + return t.baseline.GetAllSources(key) +} + +// GetString wraps Viper for concurrent access +func (t *teeConfig) GetString(key string) string { + return t.baseline.GetString(key) +} + +// GetBool wraps Viper for concurrent access +func (t *teeConfig) GetBool(key string) bool { + return t.baseline.GetBool(key) +} + +// GetInt wraps Viper for concurrent access +func (t *teeConfig) GetInt(key string) int { + return t.baseline.GetInt(key) +} + +// GetInt32 wraps Viper for concurrent access +func (t *teeConfig) GetInt32(key string) int32 { + return t.baseline.GetInt32(key) +} + +// GetInt64 wraps Viper for concurrent access +func (t *teeConfig) GetInt64(key string) int64 { + return t.baseline.GetInt64(key) +} + +// GetFloat64 wraps Viper for concurrent access +func (t *teeConfig) GetFloat64(key string) float64 { + return t.baseline.GetFloat64(key) +} + +// GetTime wraps Viper for concurrent access +func (t *teeConfig) GetTime(key string) time.Time { + return t.baseline.GetTime(key) +} + +// GetDuration wraps Viper for concurrent access +func (t *teeConfig) GetDuration(key string) time.Duration { + return t.baseline.GetDuration(key) +} + +// GetStringSlice wraps Viper for concurrent access +func (t *teeConfig) GetStringSlice(key string) []string { + return t.baseline.GetStringSlice(key) +} + +// GetFloat64SliceE loads a key as a []float64 +func (t *teeConfig) GetFloat64SliceE(key string) ([]float64, error) { + return t.baseline.GetFloat64SliceE(key) +} + +// GetStringMap wraps Viper for concurrent access +func (t *teeConfig) GetStringMap(key string) map[string]interface{} { + return t.baseline.GetStringMap(key) +} + +// GetStringMapString wraps Viper for concurrent access +func (t *teeConfig) GetStringMapString(key string) map[string]string { + return t.baseline.GetStringMapString(key) +} + +// GetStringMapStringSlice wraps Viper for concurrent access +func (t *teeConfig) GetStringMapStringSlice(key string) map[string][]string { + return t.baseline.GetStringMapStringSlice(key) +} + +// GetSizeInBytes wraps Viper for concurrent access +func (t *teeConfig) GetSizeInBytes(key string) uint { + return t.baseline.GetSizeInBytes(key) +} + +// GetSource wraps Viper for concurrent access +func (t *teeConfig) GetSource(key string) model.Source { + return t.baseline.GetSource(key) +} + +// SetEnvPrefix wraps Viper for concurrent access, and keeps the envPrefix for +// future reference +func (t *teeConfig) SetEnvPrefix(in string) { + t.baseline.SetEnvPrefix(in) + t.compare.SetEnvPrefix(in) +} + +// BindEnv wraps Viper for concurrent access, and adds tracking of the configurable env vars +func (t *teeConfig) BindEnv(input ...string) { + t.baseline.BindEnv(input...) + t.compare.BindEnv(input...) +} + +// SetEnvKeyReplacer wraps Viper for concurrent access +func (t *teeConfig) SetEnvKeyReplacer(r *strings.Replacer) { + t.baseline.SetEnvKeyReplacer(r) + t.compare.SetEnvKeyReplacer(r) +} + +// UnmarshalKey wraps Viper for concurrent access +func (t *teeConfig) UnmarshalKey(key string, rawVal interface{}, opts ...viper.DecoderConfigOption) error { + return t.baseline.UnmarshalKey(key, rawVal, opts...) +} + +// Unmarshal wraps Viper for concurrent access +func (t *teeConfig) Unmarshal(rawVal interface{}) error { + return t.baseline.Unmarshal(rawVal) +} + +// UnmarshalExact wraps Viper for concurrent access +func (t *teeConfig) UnmarshalExact(rawVal interface{}) error { + return t.baseline.UnmarshalExact(rawVal) +} + +// ReadInConfig wraps Viper for concurrent access +func (t *teeConfig) ReadInConfig() error { + err1 := t.baseline.ReadInConfig() + err2 := t.compare.ReadInConfig() + if err1 != nil { + return err1 + } + if err2 != nil { + return err2 + } + return nil +} + +// ReadConfig wraps Viper for concurrent access +func (t *teeConfig) ReadConfig(in io.Reader) error { + err1 := t.baseline.ReadConfig(in) + err2 := t.compare.ReadConfig(in) + if err1 != nil { + return err1 + } + if err2 != nil { + return err2 + } + return nil + +} + +// MergeConfig wraps Viper for concurrent access +func (t *teeConfig) MergeConfig(in io.Reader) error { + err1 := t.baseline.MergeConfig(in) + err2 := t.compare.MergeConfig(in) + if err1 != nil { + return err1 + } + if err2 != nil { + return err2 + } + return nil + +} + +// MergeFleetPolicy merges the configuration from the reader given with an existing config +// it overrides the existing values with the new ones in the FleetPolicies source, and updates the main config +// according to sources priority order. +// +// Note: this should only be called at startup, as notifiers won't receive a notification when this loads +func (t *teeConfig) MergeFleetPolicy(configPath string) error { + err1 := t.baseline.MergeFleetPolicy(configPath) + err2 := t.compare.MergeFleetPolicy(configPath) + if err1 != nil { + return err1 + } + if err2 != nil { + return err2 + } + return nil +} + +// MergeConfigMap merges the configuration from the map given with an existing config. +// Note that the map given may be modified. +func (t *teeConfig) MergeConfigMap(cfg map[string]any) error { + err1 := t.baseline.MergeConfigMap(cfg) + err2 := t.compare.MergeConfigMap(cfg) + if err1 != nil { + return err1 + } + if err2 != nil { + return err2 + } + return nil +} + +// AllSettings wraps Viper for concurrent access +func (t *teeConfig) AllSettings() map[string]interface{} { + return t.baseline.AllSettings() +} + +// AllSettingsWithoutDefault returns a copy of the all the settings in the configuration without defaults +func (t *teeConfig) AllSettingsWithoutDefault() map[string]interface{} { + return t.baseline.AllSettingsWithoutDefault() +} + +// AllSettingsBySource returns the settings from each source (file, env vars, ...) +func (t *teeConfig) AllSettingsBySource() map[model.Source]interface{} { + return t.baseline.AllSettingsBySource() +} + +// AddConfigPath wraps Viper for concurrent access +func (t *teeConfig) AddConfigPath(in string) { + t.baseline.AddConfigPath(in) + t.compare.AddConfigPath(in) +} + +// AddExtraConfigPaths allows adding additional configuration files +// which will be merged into the main configuration during the ReadInConfig call. +// Configuration files are merged sequentially. If a key already exists and the foreign value type matches the existing one, the foreign value overrides it. +// If both the existing value and the new value are nested configurations, they are merged recursively following the same principles. +func (t *teeConfig) AddExtraConfigPaths(ins []string) error { + err1 := t.baseline.AddExtraConfigPaths(ins) + err2 := t.compare.AddExtraConfigPaths(ins) + if err1 != nil { + return err1 + } + if err2 != nil { + return err2 + } + return nil +} + +// SetConfigName wraps Viper for concurrent access +func (t *teeConfig) SetConfigName(in string) { + t.baseline.SetConfigName(in) + t.compare.SetConfigName(in) +} + +// SetConfigFile wraps Viper for concurrent access +func (t *teeConfig) SetConfigFile(in string) { + t.baseline.SetConfigFile(in) + t.compare.SetConfigFile(in) +} + +// SetConfigType wraps Viper for concurrent access +func (t *teeConfig) SetConfigType(in string) { + t.baseline.SetConfigType(in) + t.compare.SetConfigType(in) +} + +// ConfigFileUsed wraps Viper for concurrent access +func (t *teeConfig) ConfigFileUsed() string { + return t.baseline.ConfigFileUsed() +} + +//func (t *teeConfig) SetTypeByDefaultValue(in bool) { +// t.baseline.SetTypeByDefaultValue(in) +// t.compare.SetTypeByDefaultValue(in) +//} + +// GetEnvVars implements the Config interface +func (t *teeConfig) GetEnvVars() []string { + return t.baseline.GetEnvVars() +} + +// BindEnvAndSetDefault implements the Config interface +func (t *teeConfig) BindEnvAndSetDefault(key string, val interface{}, env ...string) { + t.baseline.BindEnvAndSetDefault(key, val, env...) + t.compare.BindEnvAndSetDefault(key, val, env...) +} + +func (t *teeConfig) Warnings() *model.Warnings { + return nil +} + +func (t *teeConfig) Object() model.Reader { + return t.baseline +} + +// CopyConfig copies the given config to the receiver config. This should only be used in tests as replacing +// the global config reference is unsafe. +func (t *teeConfig) CopyConfig(cfg model.Config) { + t.baseline.CopyConfig(cfg) + t.compare.CopyConfig(cfg) +} + +func (t *teeConfig) GetProxies() *model.Proxy { + return t.baseline.GetProxies() +} + +func (t *teeConfig) ExtraConfigFilesUsed() []string { + return t.baseline.ExtraConfigFilesUsed() +} diff --git a/pkg/config/utils/go.mod b/pkg/config/utils/go.mod index 8b493209622ae..1369a2556752d 100644 --- a/pkg/config/utils/go.mod +++ b/pkg/config/utils/go.mod @@ -13,7 +13,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../env github.com/DataDog/datadog-agent/pkg/config/mock => ../mock github.com/DataDog/datadog-agent/pkg/config/model => ../model/ + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../setup/ + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../../util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../util/filesystem @@ -44,6 +46,8 @@ require ( require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect @@ -70,7 +74,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -78,12 +82,12 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/config/utils/go.sum b/pkg/config/utils/go.sum index 77ba213060c82..765bdc23a7bf4 100644 --- a/pkg/config/utils/go.sum +++ b/pkg/config/utils/go.sum @@ -180,8 +180,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -251,15 +252,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +296,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -312,8 +313,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/databasemonitoring/config/config.go b/pkg/databasemonitoring/config/config.go index b118928a533f6..ef6915b945cfb 100644 --- a/pkg/databasemonitoring/config/config.go +++ b/pkg/databasemonitoring/config/config.go @@ -6,7 +6,7 @@ // Package config contains database-monitoring auto-discovery configuration package config -import coreconfig "github.com/DataDog/datadog-agent/pkg/config" +import pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" const ( autoDiscoveryAuroraConfigKey = "database_monitoring.autodiscovery.aurora" @@ -30,10 +30,10 @@ type AuroraConfig struct { func NewAuroraAutodiscoveryConfig() (AuroraConfig, error) { var discoveryConfigs AuroraConfig // defaults for all values are set in the config package - discoveryConfigs.Enabled = coreconfig.Datadog().GetBool(autoDiscoveryAuroraConfigKey + ".enabled") - discoveryConfigs.QueryTimeout = coreconfig.Datadog().GetInt(autoDiscoveryAuroraConfigKey + ".query_timeout") - discoveryConfigs.DiscoveryInterval = coreconfig.Datadog().GetInt(autoDiscoveryAuroraConfigKey + ".discovery_interval") - discoveryConfigs.Tags = coreconfig.Datadog().GetStringSlice(autoDiscoveryAuroraConfigKey + ".tags") - discoveryConfigs.Region = coreconfig.Datadog().GetString(autoDiscoveryAuroraConfigKey + ".region") + discoveryConfigs.Enabled = pkgconfigsetup.Datadog().GetBool(autoDiscoveryAuroraConfigKey + ".enabled") + discoveryConfigs.QueryTimeout = pkgconfigsetup.Datadog().GetInt(autoDiscoveryAuroraConfigKey + ".query_timeout") + discoveryConfigs.DiscoveryInterval = pkgconfigsetup.Datadog().GetInt(autoDiscoveryAuroraConfigKey + ".discovery_interval") + discoveryConfigs.Tags = pkgconfigsetup.Datadog().GetStringSlice(autoDiscoveryAuroraConfigKey + ".tags") + discoveryConfigs.Region = pkgconfigsetup.Datadog().GetString(autoDiscoveryAuroraConfigKey + ".region") return discoveryConfigs, nil } diff --git a/pkg/diagnose/check.go b/pkg/diagnose/check.go index 41cd48bf4718c..17f2e0d016b17 100644 --- a/pkg/diagnose/check.go +++ b/pkg/diagnose/check.go @@ -19,7 +19,7 @@ import ( integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" "github.com/DataDog/datadog-agent/pkg/collector/check" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -104,7 +104,7 @@ func diagnoseChecksInCLIProcess(_ diagnosis.Config, senderManager diagnosesender } } // Initializing the aggregator with a flush interval of 0 (to disable the flush goroutines) - common.LoadComponents(secretResolver, wmetaInstance, ac, pkgconfig.Datadog().GetString("confd_path")) + common.LoadComponents(secretResolver, wmetaInstance, ac, pkgconfigsetup.Datadog().GetString("confd_path")) ac.LoadAndRun(context.Background()) // Create the CheckScheduler, but do not attach it to diff --git a/pkg/diagnose/connectivity/core_endpoint.go b/pkg/diagnose/connectivity/core_endpoint.go index ffc1ecf9e52a5..81e1ea946c5d9 100644 --- a/pkg/diagnose/connectivity/core_endpoint.go +++ b/pkg/diagnose/connectivity/core_endpoint.go @@ -21,7 +21,7 @@ import ( forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/resolver" logsConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" @@ -29,7 +29,7 @@ import ( ) func getLogsHTTPEndpoints() (*logsConfig.Endpoints, error) { - datadogConfig := config.Datadog() + datadogConfig := pkgconfigsetup.Datadog() logsConfigKey := logsConfig.NewLogsConfigKeys("logs_config.", datadogConfig) return logsConfig.BuildHTTPEndpointsWithConfig(datadogConfig, logsConfigKey, "agent-http-intake.logs.", "logs", logsConfig.AgentJSONIntakeProtocol, logsConfig.DefaultIntakeOrigin) } @@ -38,7 +38,7 @@ func getLogsHTTPEndpoints() (*logsConfig.Endpoints, error) { func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { // Create domain resolvers - keysPerDomain, err := utils.GetMultipleEndpoints(config.Datadog()) + keysPerDomain, err := utils.GetMultipleEndpoints(pkgconfigsetup.Datadog()) if err != nil { return []diagnosis.Diagnosis{ { @@ -53,10 +53,10 @@ func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { var diagnoses []diagnosis.Diagnosis domainResolvers := resolver.NewSingleDomainResolvers(keysPerDomain) - client := forwarder.NewHTTPClient(config.Datadog()) + client := forwarder.NewHTTPClient(pkgconfigsetup.Datadog()) // Create diagnosis for logs - if config.Datadog().GetBool("logs_enabled") { + if pkgconfigsetup.Datadog().GetBool("logs_enabled") { endpoints, err := getLogsHTTPEndpoints() if err != nil { @@ -68,7 +68,7 @@ func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { RawError: err.Error(), }) } else { - url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, config.Datadog()) + url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, pkgconfigsetup.Datadog()) name := fmt.Sprintf("Connectivity to %s", url) diag := createDiagnosis(name, url, "", err) @@ -78,7 +78,7 @@ func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { } - endpointsInfo := getEndpointsInfo(config.Datadog()) + endpointsInfo := getEndpointsInfo(pkgconfigsetup.Datadog()) // Send requests to all endpoints for all domains for _, domainResolver := range domainResolvers { @@ -222,7 +222,7 @@ func verifyEndpointResponse(diagCfg diagnosis.Config, statusCode int, responseBo // the endpoint send an empty response. As the error 'EOF' is not very informative, it can // be interesting to 'wrap' this error to display more context. func noResponseHints(err error) string { - endpoint := utils.GetInfraEndpoint(config.Datadog()) + endpoint := utils.GetInfraEndpoint(pkgconfigsetup.Datadog()) parsedURL, parseErr := url.Parse(endpoint) if parseErr != nil { return fmt.Sprintf("Could not parse url '%v' : %v", scrubber.ScrubLine(endpoint), scrubber.ScrubLine(parseErr.Error())) diff --git a/pkg/diagnose/connectivity/core_endpoint_test.go b/pkg/diagnose/connectivity/core_endpoint_test.go index 0998f5db74992..028f833277388 100644 --- a/pkg/diagnose/connectivity/core_endpoint_test.go +++ b/pkg/diagnose/connectivity/core_endpoint_test.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/endpoints" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var ( @@ -46,7 +46,7 @@ func TestSendHTTPRequestToEndpoint(t *testing.T) { })) defer ts1.Close() - client := defaultforwarder.NewHTTPClient(config.Datadog()) + client := defaultforwarder.NewHTTPClient(pkgconfigsetup.Datadog()) // With the correct API Key, it should be a 200 statusCodeWithKey, responseBodyWithKey, _, errWithKey := sendHTTPRequestToEndpoint(context.Background(), client, ts1.URL, endpointInfoTest, apiKey1) diff --git a/pkg/diagnose/connectivity/endpoint_info.go b/pkg/diagnose/connectivity/endpoint_info.go index c90294d30cd19..5c6bf3d245bc9 100644 --- a/pkg/diagnose/connectivity/endpoint_info.go +++ b/pkg/diagnose/connectivity/endpoint_info.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/flare/helpers" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/endpoints" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // endpointInfo is a value object that contains all the information we need to @@ -31,7 +31,7 @@ type endpointInfo struct { Payload []byte } -func getEndpointsInfo(cfg config.Reader) []endpointInfo { +func getEndpointsInfo(cfg model.Reader) []endpointInfo { emptyPayload := []byte("{}") checkRunPayload := []byte("{\"check\": \"test\", \"status\": 0}") diff --git a/pkg/diagnose/ports/ports.go b/pkg/diagnose/ports/ports.go index c53a7bd18d449..17db62bf8f801 100644 --- a/pkg/diagnose/ports/ports.go +++ b/pkg/diagnose/ports/ports.go @@ -11,7 +11,7 @@ import ( "path" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/port" ) @@ -39,14 +39,14 @@ func DiagnosePortSuite() []diagnosis.Diagnosis { } var diagnoses []diagnosis.Diagnosis - for _, key := range config.Datadog().AllKeysLowercased() { + for _, key := range pkgconfigsetup.Datadog().AllKeysLowercased() { splitKey := strings.Split(key, ".") keyName := splitKey[len(splitKey)-1] if keyName != "port" && !strings.HasPrefix(keyName, "port_") && !strings.HasSuffix(keyName, "_port") { continue } - value := config.Datadog().GetInt(key) + value := pkgconfigsetup.Datadog().GetInt(key) if value <= 0 { continue } diff --git a/pkg/diagnose/runner.go b/pkg/diagnose/runner.go index eca913f089f9f..281860310689a 100644 --- a/pkg/diagnose/runner.go +++ b/pkg/diagnose/runner.go @@ -27,7 +27,7 @@ import ( "github.com/fatih/color" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/connectivity" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/diagnose/ports" @@ -272,19 +272,19 @@ func getDiagnosesFromCurrentProcess(diagCfg diagnosis.Config, suites []diagnosis func requestDiagnosesFromAgentProcess(diagCfg diagnosis.Config) (*diagnosis.DiagnoseResult, error) { // Get client to Agent's RPC call c := util.GetClient(false) - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("error getting IPC address for the agent: %w", err) } // Make sure we have a session token (for privileged information) - if err = util.SetAuthToken(pkgconfig.Datadog()); err != nil { + if err = util.SetAuthToken(pkgconfigsetup.Datadog()); err != nil { return nil, fmt.Errorf("auth error: %w", err) } // Form call end-point //nolint:revive // TODO(CINT) Fix revive linter - diagnoseURL := fmt.Sprintf("https://%v:%v/agent/diagnose", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + diagnoseURL := fmt.Sprintf("https://%v:%v/agent/diagnose", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) // Serialized diag config to pass it to Agent execution context var cfgSer []byte diff --git a/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c b/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c new file mode 100644 index 0000000000000..f3c17c3dd5ca3 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c @@ -0,0 +1,99 @@ +#include "bpf_helpers.h" +#include "bpf_tracing.h" +#include "kconfig.h" +#include +#include "types.h" + +#define MAX_STRING_SIZE {{ .InstrumentationInfo.InstrumentationOptions.StringMaxSize}} +#define PARAM_BUFFER_SIZE {{ .InstrumentationInfo.InstrumentationOptions.ArgumentsMaxSize}} +#define STACK_DEPTH_LIMIT 10 +#define MAX_SLICE_SIZE 1800 +#define MAX_SLICE_LENGTH 20 + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 1 << 24); +} events SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(char[PARAM_BUFFER_SIZE])); + __uint(max_entries, 1); +} zeroval SEC(".maps"); + +struct event { + struct base_event base; + char output[PARAM_BUFFER_SIZE]; +}; + +SEC("uprobe/{{.GetBPFFuncName}}") +int {{.GetBPFFuncName}}(struct pt_regs *ctx) +{ + bpf_printk("{{.GetBPFFuncName}} probe in {{.ServiceName}} has triggered"); + + // reserve space on ringbuffer + struct event *event; + event = bpf_ringbuf_reserve(&events, sizeof(struct event), 0); + if (!event) { + bpf_printk("No space available on ringbuffer, dropping event"); + return 0; + } + + char* zero_string; + __u32 key = 0; + zero_string = bpf_map_lookup_elem(&zeroval, &key); + if (!zero_string) { + bpf_printk("couldn't lookup zero value in zeroval array map, dropping event for {{.GetBPFFuncName}}"); + bpf_ringbuf_discard(event, 0); + return 0; + } + + bpf_probe_read(&event->base.probe_id, sizeof(event->base.probe_id), zero_string); + bpf_probe_read(&event->base.program_counters, sizeof(event->base.program_counters), zero_string); + bpf_probe_read(&event->output, sizeof(event->output), zero_string); + bpf_probe_read(&event->base.probe_id, {{ .ID | len }}, "{{.ID}}"); + + // Get tid and tgid + u64 pidtgid = bpf_get_current_pid_tgid(); + u32 tgid = pidtgid >> 32; + event->base.pid = tgid; + + u64 uidgid = bpf_get_current_uid_gid(); + u32 uid = uidgid >> 32; + event->base.uid = uid; + + // Collect stack trace + __u64 currentPC = ctx->pc; + bpf_probe_read(&event->base.program_counters[0], sizeof(__u64), ¤tPC); + + __u64 bp = ctx->regs[29]; + bpf_probe_read(&bp, sizeof(__u64), (void*)bp); // dereference bp to get current stack frame + __u64 ret_addr = ctx->regs[30]; // when bpf prog enters, the return address hasn't yet been written to the stack + + int i; + for (i = 1; i < STACK_DEPTH_LIMIT; i++) + { + if (bp == 0) { + break; + } + bpf_probe_read(&event->base.program_counters[i], sizeof(__u64), &ret_addr); + bpf_probe_read(&ret_addr, sizeof(__u64), (void*)(bp-8)); + bpf_probe_read(&bp, sizeof(__u64), (void*)bp); + } + + // Collect parameters + __u8 param_type; + __u16 param_size; + __u16 slice_length; + + int outputOffset = 0; + + {{ .InstrumentationInfo.BPFParametersSourceCode }} + + bpf_ringbuf_submit(event, 0); + + return 0; +} + +char __license[] SEC("license") = "GPL"; diff --git a/pkg/dynamicinstrumentation/codegen/c/types.h b/pkg/dynamicinstrumentation/codegen/c/types.h new file mode 100644 index 0000000000000..f170b91fe7541 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/c/types.h @@ -0,0 +1,14 @@ +#ifndef DI_TYPES_H +#define DI_TYPES_H + +#include "ktypes.h" + +// NOTE: Be careful when adding fields, alignment should always be to 8 bytes +struct base_event { + char probe_id[304]; + __u32 pid; + __u32 uid; + __u64 program_counters[10]; +}__attribute__((aligned(8))); + +#endif diff --git a/pkg/dynamicinstrumentation/codegen/codegen.go b/pkg/dynamicinstrumentation/codegen/codegen.go new file mode 100644 index 0000000000000..6c3e7b44905cf --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/codegen.go @@ -0,0 +1,232 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package codegen is used to generate bpf program source code based on probe definitions +package codegen + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strings" + "text/template" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +// GenerateBPFParamsCode generates the source code associated with the probe and data +// in it's associated process info. +func GenerateBPFParamsCode(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error { + parameterBytes := []byte{} + out := bytes.NewBuffer(parameterBytes) + + if probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters { + params := applyCaptureDepth(procInfo.TypeMap.Functions[probe.FuncName], probe.InstrumentationInfo.InstrumentationOptions.MaxReferenceDepth) + applyFieldCountLimit(params) + for i := range params { + flattenedParams := flattenParameters([]ditypes.Parameter{params[i]}) + + err := generateHeadersText(flattenedParams, out) + if err != nil { + return err + } + + err = generateParametersText(flattenedParams, out) + if err != nil { + return err + } + } + } else { + log.Info("Not capturing parameters") + } + + probe.InstrumentationInfo.BPFParametersSourceCode = out.String() + return nil +} + +func resolveHeaderTemplate(param *ditypes.Parameter) (*template.Template, error) { + switch param.Kind { + case uint(reflect.String): + if param.Location.InReg { + return template.New("string_reg_header_template").Parse(stringRegisterHeaderTemplateText) + } + return template.New("string_stack_header_template").Parse(stringStackHeaderTemplateText) + case uint(reflect.Slice): + if param.Location.InReg { + return template.New("slice_reg_header_template").Parse(sliceRegisterHeaderTemplateText) + } + return template.New("slice_stack_header_template").Parse(sliceStackHeaderTemplateText) + default: + return template.New("header_template").Parse(headerTemplateText) + } +} + +func generateHeadersText(params []ditypes.Parameter, out io.Writer) error { + for i := range params { + err := generateHeaderText(params[i], out) + if err != nil { + return err + } + } + return nil +} + +func generateHeaderText(param ditypes.Parameter, out io.Writer) error { + if reflect.Kind(param.Kind) == reflect.Slice { + return generateSliceHeader(¶m, out) + } + + tmplt, err := resolveHeaderTemplate(¶m) + if err != nil { + return err + } + err = tmplt.Execute(out, param) + if err != nil { + return err + } + return nil +} + +func generateParametersText(params []ditypes.Parameter, out io.Writer) error { + for i := range params { + err := generateParameterText(¶ms[i], out) + if err != nil { + return err + } + } + return nil +} + +func generateParameterText(param *ditypes.Parameter, out io.Writer) error { + + if param.Kind == uint(reflect.Array) || + param.Kind == uint(reflect.Struct) || + param.Kind == uint(reflect.Pointer) { + // - Arrays/structs don't have actual values, we just want to generate + // a header for them for the sake of event parsing. + // - Pointers do have actual values, but they're captured when the + // underlying value is also captured. + return nil + } + + template, err := resolveParameterTemplate(param) + if err != nil { + return err + } + param.Type = cleanupTypeName(param.Type) + err = template.Execute(out, param) + if err != nil { + return fmt.Errorf("could not execute template for generating read of parameter: %w", err) + } + + return nil +} + +func resolveParameterTemplate(param *ditypes.Parameter) (*template.Template, error) { + if param.Type == "main.triggerVerifierErrorForTesting" { + return template.New("trigger_verifier_error_template").Parse(forcedVerifierErrorTemplate) + } + notSupported := param.NotCaptureReason == ditypes.Unsupported + cutForFieldLimit := param.NotCaptureReason == ditypes.FieldLimitReached + + if notSupported { + return template.New("unsupported_type_template").Parse(unsupportedTypeTemplateText) + } else if cutForFieldLimit { + return template.New("cut_field_limit_template").Parse(cutForFieldLimitTemplateText) + } + + if param.Location.InReg { + return resolveRegisterParameterTemplate(param) + } + return resolveStackParameterTemplate(param) +} + +func resolveRegisterParameterTemplate(param *ditypes.Parameter) (*template.Template, error) { + needsDereference := param.Location.NeedsDereference + stringType := param.Kind == uint(reflect.String) + sliceType := param.Kind == uint(reflect.Slice) + + if needsDereference { + // Register Pointer + return template.New("pointer_register_template").Parse(pointerRegisterTemplateText) + } else if stringType { + // Register String + return template.New("string_register_template").Parse(stringRegisterTemplateText) + } else if sliceType { + // Register Slice + return template.New("slice_register_template").Parse(sliceRegisterTemplateText) + } else if !needsDereference { + // Register Normal Value + return template.New("register_template").Parse(normalValueRegisterTemplateText) + } + return nil, errors.New("no template created: invalid or unsupported type") +} + +func resolveStackParameterTemplate(param *ditypes.Parameter) (*template.Template, error) { + needsDereference := param.Location.NeedsDereference + stringType := param.Kind == uint(reflect.String) + sliceType := param.Kind == uint(reflect.Slice) + + if needsDereference { + // Stack Pointer + return template.New("pointer_stack_template").Parse(pointerStackTemplateText) + } else if stringType { + // Stack String + return template.New("string_stack_template").Parse(stringStackTemplateText) + } else if sliceType { + // Stack Slice + return template.New("slice_stack_template").Parse(sliceStackTemplateText) + } else if !needsDereference { + // Stack Normal Value + return template.New("stack_template").Parse(normalValueStackTemplateText) + } + return nil, errors.New("no template created: invalid or unsupported type") +} + +func cleanupTypeName(s string) string { + return strings.TrimPrefix(s, "*") +} + +func generateSliceHeader(slice *ditypes.Parameter, out io.Writer) error { + if slice == nil { + return errors.New("nil slice parameter when generating header code") + } + if len(slice.ParameterPieces) != 1 { + return errors.New("invalid slice parameter when generating header code") + } + + x := []byte{} + buf := bytes.NewBuffer(x) + err := generateHeaderText(slice.ParameterPieces[0], buf) + if err != nil { + return err + } + w := sliceHeaderWrapper{ + Parameter: slice, + SliceTypeHeaderText: buf.String(), + } + + sliceTemplate, err := resolveHeaderTemplate(slice) + if err != nil { + return err + } + + err = sliceTemplate.Execute(out, w) + if err != nil { + return fmt.Errorf("could not execute template for generating slice header: %w", err) + } + return nil +} + +type sliceHeaderWrapper struct { + Parameter *ditypes.Parameter + SliceTypeHeaderText string +} diff --git a/pkg/dynamicinstrumentation/codegen/compile.go b/pkg/dynamicinstrumentation/codegen/compile.go new file mode 100644 index 0000000000000..abb8523280443 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/compile.go @@ -0,0 +1,11 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package codegen + +//go:generate $GOPATH/bin/include_headers pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c pkg/ebpf/bytecode/build/runtime/dynamicinstrumentation.c pkg/ebpf/c +//go:generate $GOPATH/bin/integrity pkg/ebpf/bytecode/build/runtime/dynamicinstrumentation.c pkg/ebpf/bytecode/runtime/dynamicinstrumentation.go runtime diff --git a/pkg/dynamicinstrumentation/codegen/output_offsets.go b/pkg/dynamicinstrumentation/codegen/output_offsets.go new file mode 100644 index 0000000000000..56250b25897c2 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/output_offsets.go @@ -0,0 +1,138 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package codegen + +import ( + "math/rand" + "reflect" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +type paramDepthCounter struct { + depth int + param *ditypes.Parameter +} + +func applyCaptureDepth(params []ditypes.Parameter, maxDepth int) []ditypes.Parameter { + log.Tracef("Applying capture depth: %d", maxDepth) + queue := []paramDepthCounter{} + + for i := range params { + queue = append(queue, paramDepthCounter{ + depth: 0, + param: ¶ms[i], + }) + } + + for len(queue) != 0 { + front := queue[0] + queue = queue[1:] + + if front.depth == maxDepth { + // max capture depth reached, remove parameters below this level. + front.param.ParameterPieces = []ditypes.Parameter{} + if front.param.Kind == uint(reflect.Struct) { + // struct size reflects the number of fields, + // setting to 0 tells the user space parsing not to + // expect anything else. + front.param.TotalSize = 0 + } + } else { + for i := range front.param.ParameterPieces { + queue = append(queue, paramDepthCounter{ + depth: front.depth + 1, + param: &front.param.ParameterPieces[i], + }) + } + } + } + return params +} + +func flattenParameters(params []ditypes.Parameter) []ditypes.Parameter { + flattenedParams := []ditypes.Parameter{} + for i := range params { + kind := reflect.Kind(params[i].Kind) + if kind == reflect.Slice { + // Slices don't get flattened as we need the underlying type. + // We populate the slice's template using that type. + flattenedParams = append(flattenedParams, params[i]) + } else if hasHeader(kind) { + paramHeader := params[i] + paramHeader.ParameterPieces = nil + flattenedParams = append(flattenedParams, paramHeader) + flattenedParams = append(flattenedParams, flattenParameters(params[i].ParameterPieces)...) + } else if len(params[i].ParameterPieces) > 0 { + flattenedParams = append(flattenedParams, flattenParameters(params[i].ParameterPieces)...) + } else { + flattenedParams = append(flattenedParams, params[i]) + } + } + + for i := range flattenedParams { + flattenedParams[i].ID = randomID() + } + + return flattenedParams +} + +func applyFieldCountLimit(params []ditypes.Parameter) { + queue := []*ditypes.Parameter{} + for i := range params { + queue = append(queue, ¶ms[len(params)-1-i]) + } + var ( + current *ditypes.Parameter + max int + ) + for len(queue) != 0 { + current = queue[0] + queue = queue[1:] + + max = len(current.ParameterPieces) + if len(current.ParameterPieces) > ditypes.MaxFieldCount { + max = ditypes.MaxFieldCount + for j := max; j < len(current.ParameterPieces); j++ { + excludeForFieldCount(¤t.ParameterPieces[j]) + } + } + for n := 0; n < max; n++ { + queue = append(queue, ¤t.ParameterPieces[n]) + } + } +} + +func excludeForFieldCount(root *ditypes.Parameter) { + // Exclude all in this tree + if root == nil { + return + } + root.NotCaptureReason = ditypes.FieldLimitReached + root.Kind = ditypes.KindCutFieldLimit + for i := range root.ParameterPieces { + excludeForFieldCount(&root.ParameterPieces[i]) + } +} + +func hasHeader(kind reflect.Kind) bool { + return kind == reflect.Struct || + kind == reflect.Array || + kind == reflect.Pointer +} + +func randomID() string { + length := 6 + randomString := make([]byte, length) + for i := 0; i < length; i++ { + randomString[i] = byte(65 + rand.Intn(25)) + } + return string(randomString) +} diff --git a/pkg/dynamicinstrumentation/codegen/templates.go b/pkg/dynamicinstrumentation/codegen/templates.go new file mode 100644 index 0000000000000..64f5dab18ec3a --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/templates.go @@ -0,0 +1,210 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package codegen + +var forcedVerifierErrorTemplate = ` +int illegalDereference = *(*(*ctx->regs[0])); +` + +var headerTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Write the kind and size to output buffer +param_type = {{.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); +param_size = {{.TotalSize}}; +bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size); +outputOffset += 3; +` + +// The length of slices aren't known until parsing, so they require +// special headers to read in the length dynamically +var sliceRegisterHeaderTemplateText = ` +// Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}} +// Write the slice kind to output buffer +param_type = {{.Parameter.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); +// Read slice length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[{{.Parameter.Location.Register}}+1]); +bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size); +outputOffset += 3; + +__u16 indexSlice{{.Parameter.ID}}; +slice_length = param_size; +if (slice_length > MAX_SLICE_LENGTH) { + slice_length = MAX_SLICE_LENGTH; +} + +for (indexSlice{{.Parameter.ID}} = 0; indexSlice{{.Parameter.ID}} < MAX_SLICE_LENGTH; indexSlice{{.Parameter.ID}}++) { + if (indexSlice{{.Parameter.ID}} >= slice_length) { + break; + } + {{.SliceTypeHeaderText}} +} +` + +// The length of slices aren't known until parsing, so they require +// special headers to read in the length dynamically +var sliceStackHeaderTemplateText = ` +// Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}} +// Write the slice kind to output buffer +param_type = {{.Parameter.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); +// Read slice length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[29]+{{.Parameter.Location.StackOffset}}+8]); +bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size); +outputOffset += 3; + +__u16 indexSlice{{.Parameter.ID}}; +slice_length = param_size; +if (slice_length > MAX_SLICE_LENGTH) { + slice_length = MAX_SLICE_LENGTH; +} + +for (indexSlice{{.Parameter.ID}} = 0; indexSlice{{.Parameter.ID}} < MAX_SLICE_LENGTH; indexSlice{{.Parameter.ID}}++) { + if (indexSlice{{.Parameter.ID}} >= slice_length) { + break; + } + {{.SliceTypeHeaderText}} +} +` + +// The length of strings aren't known until parsing, so they require +// special headers to read in the length dynamically +var stringRegisterHeaderTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Write the string kind to output buffer +param_type = {{.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); + +// Read string length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[{{.Location.Register}}+1]); + +// Limit string length +__u16 string_size_{{.ID}} = param_size; +if (string_size_{{.ID}} > MAX_STRING_SIZE) { + string_size_{{.ID}} = MAX_STRING_SIZE; +} +bpf_probe_read(&event->output[outputOffset+1], sizeof(string_size_{{.ID}}), &string_size_{{.ID}}); +outputOffset += 3; +` + +// The length of strings aren't known until parsing, so they require +// special headers to read in the length dynamically +var stringStackHeaderTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Write the string kind to output buffer +param_type = {{.Kind}}; +bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); +// Read string length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), (char*)((ctx->regs[29])+{{.Location.StackOffset}}+8)); +// Limit string length +__u16 string_size_{{.ID}} = param_size; +if (string_size_{{.ID}} > MAX_STRING_SIZE) { + string_size_{{.ID}} = MAX_STRING_SIZE; +} +bpf_probe_read(&event->output[outputOffset+1], sizeof(string_size_{{.ID}}), &string_size_{{.ID}}); +outputOffset += 3; +` + +var sliceRegisterTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read contents of slice +bpf_probe_read(&event->output[outputOffset], MAX_SLICE_SIZE, (void*)ctx->regs[{{.Location.Register}}]); +outputOffset += MAX_SLICE_SIZE; +` + +var sliceStackTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read contents of slice +bpf_probe_read(&event->output[outputOffset], MAX_SLICE_SIZE, (void*)(ctx->regs[29]+{{.Location.StackOffset}}); +outputOffset += MAX_SLICE_SIZE;` + +var stringRegisterTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read string length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), &ctx->regs[{{.Location.Register}}+1]); + +__u16 string_size_read_{{.ID}} = param_size; +if (string_size_read_{{.ID}} > MAX_STRING_SIZE) { + string_size_read_{{.ID}} = MAX_STRING_SIZE; +} + +// Read contents of string +bpf_probe_read(&event->output[outputOffset], string_size_read_{{.ID}}, (void*)ctx->regs[{{.Location.Register}}]); +outputOffset += string_size_read_{{.ID}}; +` + +var stringStackTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read string length and write it to output buffer +bpf_probe_read(¶m_size, sizeof(param_size), (char*)((ctx->regs[29])+{{.Location.StackOffset}}+8)); +// Limit string length +__u16 string_size_read_{{.ID}} = param_size; +if (string_size_read_{{.ID}} > MAX_STRING_SIZE) { + string_size_read_{{.ID}} = MAX_STRING_SIZE; +} +// Read contents of string +bpf_probe_read(&ret_addr, sizeof(__u64), (void*)(ctx->regs[29]+{{.Location.StackOffset}})); +bpf_probe_read(&event->output[outputOffset], string_size_read_{{.ID}}, (void*)(ret_addr)); +outputOffset += string_size_read_{{.ID}}; +` + +var pointerRegisterTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read the pointer value (address of underlying value) +void *ptrTo{{.ID}}; +bpf_probe_read(&ptrTo{{.ID}}, sizeof(ptrTo{{.ID}}), &ctx->regs[{{.Location.Register}}]); + +// Write the underlying value to output +bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, ptrTo{{.ID}}+{{.Location.PointerOffset}}); +outputOffset += {{.TotalSize}}; + +// Write the pointer address to output +ptrTo{{.ID}} += {{.Location.PointerOffset}}; +bpf_probe_read(&event->output[outputOffset], sizeof(ptrTo{{.ID}}), &ptrTo{{.ID}}); +` + +var pointerStackTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read the pointer value (address of underlying value) +void *ptrTo{{.ID}}; +bpf_probe_read(&ptrTo{{.ID}}, sizeof(ptrTo{{.ID}}), (char*)((ctx->regs[29])+{{.Location.StackOffset}}+8)); + +// Write the underlying value to output +bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, ptrTo{{.ID}}+{{.Location.PointerOffset}}); +outputOffset += {{.TotalSize}}; + +// Write the pointer address to output +ptrTo{{.ID}} += {{.Location.PointerOffset}}; +bpf_probe_read(&event->output[outputOffset], sizeof(ptrTo{{.ID}}), &ptrTo{{.ID}}); +` + +var normalValueRegisterTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, &ctx->regs[{{.Location.Register}}]); +outputOffset += {{.TotalSize}}; +` + +var normalValueStackTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// Read value for {{.Name}} +bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, (char*)((ctx->regs[29])+{{.Location.StackOffset}})); +outputOffset += {{.TotalSize}}; +` + +// Unsupported types just get a single `255` value to signify as a placeholder +// that an unsupported type goes here. Size is where we keep the actual type. +var unsupportedTypeTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// No capture, unsupported type +` + +var cutForFieldLimitTemplateText = ` +// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} +// No capture, cut for field limit +` diff --git a/pkg/dynamicinstrumentation/di.go b/pkg/dynamicinstrumentation/di.go new file mode 100644 index 0000000000000..409b8a1af5f7a --- /dev/null +++ b/pkg/dynamicinstrumentation/di.go @@ -0,0 +1,159 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package dynamicinstrumentation provides the main entrypoint into running the +// dynamic instrumentation for Go product +package dynamicinstrumentation + +import ( + "encoding/json" + "fmt" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diconfig" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ebpf" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/uploader" +) + +// GoDI is the central controller representation of the Dynamic Instrumentation +// implementation for Go services +type GoDI struct { + cm diconfig.ConfigManager + + lu uploader.LogUploader + du uploader.DiagnosticUploader + + processEvent ditypes.EventCallback + Close func() + + stats GoDIStats +} + +// GoDIStats is used to track various metrics relevant to the health of the +// Dynamic Instrumentation process +type GoDIStats struct { + PIDEventsCreatedCount map[uint32]uint64 // pid : count + ProbeEventsCreatedCount map[string]uint64 // probeID : count +} + +func newGoDIStats() GoDIStats { + return GoDIStats{ + PIDEventsCreatedCount: make(map[uint32]uint64), + ProbeEventsCreatedCount: make(map[string]uint64), + } +} + +// DIOptions is used to configure the running Dynamic Instrumentation process +type DIOptions struct { + Offline bool + + ProbesFilePath string + SnapshotOutput string + DiagnosticOutput string + + ditypes.EventCallback +} + +// RunDynamicInstrumentation is the main entry point into running the Dynamic +// Instrumentation project for Go. +func RunDynamicInstrumentation(opts *DIOptions) (*GoDI, error) { + var goDI *GoDI + + err := ebpf.SetupEventsMap() + if err != nil { + return nil, err + } + + if opts.Offline { + cm, err := diconfig.NewFileConfigManager(opts.ProbesFilePath) + if err != nil { + return nil, fmt.Errorf("couldn't create new file config manager: %w", err) + } + lu, err := uploader.NewOfflineLogSerializer(opts.SnapshotOutput) + if err != nil { + return nil, fmt.Errorf("couldn't create new offline log serializer: %w", err) + } + du, err := uploader.NewOfflineDiagnosticSerializer(diagnostics.Diagnostics, opts.DiagnosticOutput) + if err != nil { + return nil, fmt.Errorf("couldn't create new offline diagnostic serializer: %w", err) + } + goDI = &GoDI{ + cm: cm, + lu: lu, + du: du, + stats: newGoDIStats(), + } + } else { + cm, err := diconfig.NewRCConfigManager() + if err != nil { + return nil, fmt.Errorf("couldn't create new RC config manager: %w", err) + } + goDI = &GoDI{ + cm: cm, + lu: uploader.NewLogUploader(), + du: uploader.NewDiagnosticUploader(), + stats: newGoDIStats(), + } + } + if opts.EventCallback != nil { + goDI.processEvent = opts.EventCallback + } else { + goDI.processEvent = goDI.uploadSnapshot + } + + closeRingbuffer, err := goDI.startRingbufferConsumer() + if err != nil { + return nil, fmt.Errorf("couldn't set up new ringbuffer consumer: %w", err) + } + + goDI.Close = func() { + goDI.cm.Stop() + closeRingbuffer() + } + + return goDI, nil +} + +func (goDI *GoDI) printSnapshot(event *ditypes.DIEvent) { + if event == nil { + return + } + procInfo := goDI.cm.GetProcInfos()[event.PID] + diLog := uploader.NewDILog(procInfo, event) + + var bs []byte + var err error + + if diLog != nil { + bs, err = json.MarshalIndent(diLog, "", " ") + } else { + bs, err = json.MarshalIndent(event, "", " ") + } + + if err != nil { + log.Info(err) + } + log.Debug(string(bs)) +} + +func (goDI *GoDI) uploadSnapshot(event *ditypes.DIEvent) { + goDI.printSnapshot(event) + procInfo := goDI.cm.GetProcInfos()[event.PID] + diLog := uploader.NewDILog(procInfo, event) + if diLog != nil { + goDI.lu.Enqueue(diLog) + } +} + +// GetStats returns the maps of various statitics for +// runtime health of dynamic instrumentation +func (goDI *GoDI) GetStats() GoDIStats { + return goDI.stats +} diff --git a/pkg/dynamicinstrumentation/diagnostics/diagnostics.go b/pkg/dynamicinstrumentation/diagnostics/diagnostics.go new file mode 100644 index 0000000000000..c1e351297210d --- /dev/null +++ b/pkg/dynamicinstrumentation/diagnostics/diagnostics.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package diagnostics provides a facility for dynamic instrumentation to upload diagnostic information +package diagnostics + +import ( + "sync" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +func newDIDiagnostic(service, runtimeID, probeID string, status ditypes.Status) *ditypes.DiagnosticUpload { + return &ditypes.DiagnosticUpload{ + Service: service, + DDSource: "dd_debugger", + Debugger: struct { + ditypes.Diagnostic `json:"diagnostics"` + }{ + Diagnostic: ditypes.Diagnostic{ + RuntimeID: runtimeID, + ProbeID: probeID, + Status: status, + }, + }, + } +} + +type probeInstanceID struct { + service string + runtimeID string + probeID string +} + +// DiagnosticManager is used to keep track and upload diagnostic information +type DiagnosticManager struct { + state map[probeInstanceID]*ditypes.DiagnosticUpload + Updates chan *ditypes.DiagnosticUpload + + mu sync.Mutex +} + +// NewDiagnosticManager creates a new DiagnosticManager +func NewDiagnosticManager() *DiagnosticManager { + return &DiagnosticManager{ + state: make(map[probeInstanceID]*ditypes.DiagnosticUpload), + Updates: make(chan *ditypes.DiagnosticUpload), + } +} + +// SetStatus associates the status with the specified service/probe +func (m *DiagnosticManager) SetStatus(service, runtimeID, probeID string, status ditypes.Status) { + id := probeInstanceID{service, probeID, runtimeID} + d := newDIDiagnostic(service, runtimeID, probeID, status) + m.update(id, d) +} + +// SetError associates the error with the specified service/probe +func (m *DiagnosticManager) SetError(service, runtimeID, probeID, errorType, errorMessage string) { + id := probeInstanceID{service, probeID, runtimeID} + d := newDIDiagnostic(service, runtimeID, probeID, ditypes.StatusError) + d.SetError(errorType, errorMessage) + m.update(id, d) +} + +func (m *DiagnosticManager) update(id probeInstanceID, d *ditypes.DiagnosticUpload) { + m.mu.Lock() + defer m.mu.Unlock() + + if m.state[id] != d { + m.state[id] = d + // TODO: if there is no consumer reading updates, this blocks the calling goroutine + m.Updates <- d + } +} + +// Diagnostics is a global instance of a diagnostic manager +var Diagnostics = NewDiagnosticManager() diff --git a/pkg/dynamicinstrumentation/diconfig/binary_inspection.go b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go new file mode 100644 index 0000000000000..3a59dde426759 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go @@ -0,0 +1,269 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package diconfig + +import ( + "debug/elf" + "fmt" + "reflect" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" +) + +// inspectGoBinaries goes through each service and populates information about the binary +// and the relevant parameters, and their types +// configEvent maps service names to info about the service and their configurations +func inspectGoBinaries(configEvent ditypes.DIProcs) error { + var err error + for i := range configEvent { + err = AnalyzeBinary(configEvent[i]) + if err != nil { + return fmt.Errorf("inspection of PID %d (path=%s) failed: %w", configEvent[i].PID, configEvent[i].BinaryPath, err) + } + } + return nil +} + +// AnalyzeBinary reads the binary associated with the specified process and parses +// the DWARF information. It populates relevant fields in the process representation +func AnalyzeBinary(procInfo *ditypes.ProcessInfo) error { + functions := []string{} + targetFunctions := map[string]bool{} + for _, probe := range procInfo.GetProbes() { + functions = append(functions, probe.FuncName) + targetFunctions[probe.FuncName] = true + } + + dwarfData, err := loadDWARF(procInfo.BinaryPath) + if err != nil { + return fmt.Errorf("could not retrieve debug information from binary: %w", err) + } + + typeMap, err := getTypeMap(dwarfData, targetFunctions) + if err != nil { + return fmt.Errorf("could not retrieve type information from binary %w", err) + } + + procInfo.TypeMap = typeMap + + elfFile, err := elf.Open(procInfo.BinaryPath) + if err != nil { + return fmt.Errorf("could not open elf file %w", err) + } + + procInfo.DwarfData = dwarfData + + fieldIDs := make([]bininspect.FieldIdentifier, 0) + for _, funcParams := range typeMap.Functions { + for _, param := range funcParams { + fieldIDs = append(fieldIDs, + collectFieldIDs(param)...) + } + } + + r, err := bininspect.InspectWithDWARF(elfFile, functions, fieldIDs) + if err != nil { + return fmt.Errorf("could not determine locations of variables from debug information %w", err) + } + + // Use the result from InspectWithDWARF to populate the locations of parameters + for functionName, functionMetadata := range r.Functions { + putLocationsInParams(functionMetadata.Parameters, r.StructOffsets, procInfo.TypeMap.Functions, functionName) + correctStructSizes(procInfo.TypeMap.Functions[functionName]) + } + + return nil +} + +// collectFieldIDs returns all struct fields if there are any amongst types of parameters +// including if there's structs that are nested deep within complex types +func collectFieldIDs(param ditypes.Parameter) []bininspect.FieldIdentifier { + fieldIDs := []bininspect.FieldIdentifier{} + stack := append([]ditypes.Parameter{param}, param.ParameterPieces...) + + for len(stack) != 0 { + + current := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if !kindIsSupported(reflect.Kind(current.Kind)) { + continue + } + if len(current.ParameterPieces) != 0 { + stack = append(stack, current.ParameterPieces...) + } + + if current.Kind == uint(reflect.Struct) || current.Kind == uint(reflect.Slice) { + for _, structField := range current.ParameterPieces { + if structField.Name == "" || current.Type == "" { + // these can be blank in anonymous types or embedded fields + // of builtin types. bininspect has no ability to find offsets + // in these cases and we're best off skipping them. + continue + } + fieldIDs = append(fieldIDs, bininspect.FieldIdentifier{ + StructName: current.Type, + FieldName: structField.Name, + }) + if len(fieldIDs) >= ditypes.MaxFieldCount { + log.Info("field limit applied, not collecting further fields", len(fieldIDs), ditypes.MaxFieldCount) + return fieldIDs + } + } + } + } + return fieldIDs +} + +func putLocationsInParams( + paramMetadatas []bininspect.ParameterMetadata, + fieldLocations map[bininspect.FieldIdentifier]uint64, + funcMap map[string][]ditypes.Parameter, + funcName string) { + + params := funcMap[funcName] + locations := []ditypes.Location{} + + // Collect locations in order + for _, param := range paramMetadatas { + for _, piece := range param.Pieces { + locations = append(locations, ditypes.Location{ + InReg: piece.InReg, + StackOffset: piece.StackOffset, + Register: piece.Register, + }) + } + } + + assignLocationsInOrder(params, locations) + correctTypeSpecificLocations(params, fieldLocations) + + funcMap[funcName] = params +} + +func assignLocationsInOrder(params []ditypes.Parameter, locations []ditypes.Location) { + stack := []*ditypes.Parameter{} + locationCounter := 0 + + // Start by pushing addresses of all parameters to stack + for i := range params { + stack = append(stack, ¶ms[len(params)-1-i]) + } + + for { + if len(stack) == 0 || locationCounter == len(locations) { + return + } + current := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if len(current.ParameterPieces) != 0 && + current.Kind != uint(reflect.Array) && + current.Kind != uint(reflect.Pointer) && + current.Kind != uint(reflect.Slice) { + + for i := range current.ParameterPieces { + stack = append(stack, ¤t.ParameterPieces[len(current.ParameterPieces)-1-i]) + } + } else { + // Location fields are directly assigned instead of setting the whole + // location field to preserve other fields + locationToAssign := locations[locationCounter] + current.Location.InReg = locationToAssign.InReg + current.Location.Register = locationToAssign.Register + current.Location.StackOffset = locationToAssign.StackOffset + + if reflect.Kind(current.Kind) == reflect.String { + // Strings actually have two locations (pointer, length) + // but are shortened to a single one for parsing. The missing + // location is taken into account in bpf code, but we need + // to make sure it's not assigned to something else here. + locationCounter++ + } else if reflect.Kind(current.Kind) == reflect.Slice { + // slices actually have three locations (array, length, capacity) + // but are shortened to a single one for parsing. The missing + // locations are taken into account in bpf code, but we need + // to make sure it's not assigned to something else here. + locationCounter += 2 + } + locationCounter++ + } + } +} + +func correctTypeSpecificLocations(params []ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { + for i := range params { + if params[i].Kind == uint(reflect.Array) { + correctArrayLocations(¶ms[i], fieldLocations) + } else if params[i].Kind == uint(reflect.Pointer) { + correctPointerLocations(¶ms[i], fieldLocations) + } else if params[i].Kind == uint(reflect.Struct) { + correctStructLocations(¶ms[i], fieldLocations) + } + } +} + +// correctStructLocations sets pointer and stack offsets for struct fields from +// bininspect results +func correctStructLocations(structParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { + for i := range structParam.ParameterPieces { + fieldID := bininspect.FieldIdentifier{ + StructName: structParam.Type, + FieldName: structParam.ParameterPieces[i].Name, + } + offset, ok := fieldLocations[fieldID] + if !ok { + log.Infof("no field location available for %s.%s\n", fieldID.StructName, fieldID.FieldName) + structParam.ParameterPieces[i].NotCaptureReason = ditypes.NoFieldLocation + continue + } + + fieldLocationsHaveAlreadyBeenDirectlyAssigned := isLocationSet(structParam.ParameterPieces[i].Location) + if fieldLocationsHaveAlreadyBeenDirectlyAssigned { + // The location would be set if it was directly assigned to (i.e. has its own register instead of needing + // to dereference a pointer or get the element from a slice) + structParam.ParameterPieces[i].Location = structParam.Location + structParam.ParameterPieces[i].Location.StackOffset = int64(offset) + structParam.Location.StackOffset + } + + structParam.ParameterPieces[i].Location.PointerOffset = offset + structParam.ParameterPieces[i].Location.StackOffset = structParam.ParameterPieces[0].Location.StackOffset + int64(offset) + + correctTypeSpecificLocations([]ditypes.Parameter{structParam.ParameterPieces[i]}, fieldLocations) + } +} + +func isLocationSet(l ditypes.Location) bool { + return reflect.DeepEqual(l, ditypes.Location{}) +} + +// correctPointerLocations takes a parameters location and copies it to the underlying +// type that's pointed to. It sets `NeedsDereference` to true +// then calls the top level function on each element of the array to ensure all +// element's have corrected locations +func correctPointerLocations(pointerParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { + // Pointers should have exactly one entry in ParameterPieces that correspond to the underlying type + if len(pointerParam.ParameterPieces) != 1 { + return + } + pointerParam.ParameterPieces[0].Location = pointerParam.Location + pointerParam.ParameterPieces[0].Location.NeedsDereference = true + correctTypeSpecificLocations([]ditypes.Parameter{pointerParam.ParameterPieces[0]}, fieldLocations) +} + +// correctArrayLocations takes a parameter's location, and distribute it to each element +// by using `stack offset + (size*index)` then calls the top level function on each element +// of the array to ensure all element's have corrected locations +func correctArrayLocations(arrayParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { + initialOffset := arrayParam.Location.StackOffset + for i := range arrayParam.ParameterPieces { + arrayParam.ParameterPieces[i].Location.StackOffset = initialOffset + (arrayParam.ParameterPieces[i].TotalSize * int64(i)) + correctTypeSpecificLocations([]ditypes.Parameter{arrayParam.ParameterPieces[i]}, fieldLocations) + } +} diff --git a/pkg/dynamicinstrumentation/diconfig/config_manager.go b/pkg/dynamicinstrumentation/diconfig/config_manager.go new file mode 100644 index 0000000000000..a49287e0a7566 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/config_manager.go @@ -0,0 +1,295 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package diconfig provides utlity that allows dynamic instrumentation to receive and +// manage probe configurations from users +package diconfig + +import ( + "encoding/json" + "fmt" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/codegen" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ebpf" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/eventparser" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/proctracker" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter" + "github.com/cilium/ebpf/ringbuf" + "github.com/google/uuid" +) + +type rcConfig struct { + ID string + Version int + ProbeType string `json:"type"` + Language string + Where struct { + TypeName string `json:"typeName"` + MethodName string `json:"methodName"` + SourceFile string + Lines []string + } + Tags []string + Template string + CaptureSnapshot bool + EvaluatedAt string + Capture struct { + MaxReferenceDepth int `json:"maxReferenceDepth"` + MaxFieldCount int `json:"maxFieldCount"` + } +} + +type configUpdateCallback func(*ditypes.ProcessInfo, *ditypes.Probe) + +// ConfigManager is a facility to track probe configurations for +// instrumenting tracked processes +type ConfigManager interface { + GetProcInfos() ditypes.DIProcs + Stop() +} + +// RCConfigManager is the configuration manager which utilizes remote-config +type RCConfigManager struct { + procTracker *proctracker.ProcessTracker + + diProcs ditypes.DIProcs + callback configUpdateCallback +} + +// NewRCConfigManager creates a new configuration manager which utilizes remote-config +func NewRCConfigManager() (*RCConfigManager, error) { + log.Info("Creating new RC config manager") + cm := &RCConfigManager{ + callback: applyConfigUpdate, + } + + cm.procTracker = proctracker.NewProcessTracker(cm.updateProcesses) + err := cm.procTracker.Start() + if err != nil { + return nil, fmt.Errorf("could not start process tracker: %w", err) + } + cm.diProcs = ditypes.NewDIProcs() + return cm, nil +} + +// GetProcInfos returns the state of the RCConfigManager +func (cm *RCConfigManager) GetProcInfos() ditypes.DIProcs { + return cm.diProcs +} + +// Stop closes the config and proc trackers used by the RCConfigManager +func (cm *RCConfigManager) Stop() { + cm.procTracker.Stop() + for _, procInfo := range cm.GetProcInfos() { + procInfo.CloseAllUprobeLinks() + } +} + +// updateProcesses is the callback interface that ConfigManager uses to consume the map of `ProcessInfo`s +// It is called whenever there's an update to the state of known processes of services on the machine. +// +// It compares the previously known state of services on the machine and creates a hook on the remote-config +// callback for configurations on new ones, and deletes the hook on old ones. +func (cm *RCConfigManager) updateProcesses(runningProcs ditypes.DIProcs) { + // Remove processes that are no longer running from state and close their uprobe links + for pid, procInfo := range cm.diProcs { + _, ok := runningProcs[pid] + if !ok { + procInfo.CloseAllUprobeLinks() + delete(cm.diProcs, pid) + } + } + + for pid, runningProcInfo := range runningProcs { + _, ok := cm.diProcs[pid] + if !ok { + cm.diProcs[pid] = runningProcInfo + err := cm.installConfigProbe(runningProcInfo) + if err != nil { + log.Infof("could not install config probe for service %s (pid %d): %s", runningProcInfo.ServiceName, runningProcInfo.PID, err) + } + } + } +} + +func (cm *RCConfigManager) installConfigProbe(procInfo *ditypes.ProcessInfo) error { + var err error + configProbe := newConfigProbe() + + svcConfigProbe := *configProbe + svcConfigProbe.ServiceName = procInfo.ServiceName + procInfo.ProbesByID[configProbe.ID] = &svcConfigProbe + + err = AnalyzeBinary(procInfo) + if err != nil { + return fmt.Errorf("could not analyze binary for config probe: %w", err) + } + + err = codegen.GenerateBPFParamsCode(procInfo, configProbe) + if err != nil { + return fmt.Errorf("could not generate bpf code for config probe: %w", err) + } + + err = ebpf.CompileBPFProgram(procInfo, configProbe) + if err != nil { + return fmt.Errorf("could not compile bpf code for config probe: %w", err) + } + + err = ebpf.AttachBPFUprobe(procInfo, configProbe) + if err != nil { + return fmt.Errorf("could not attach bpf code for config probe: %w", err) + } + + m, err := procInfo.SetupConfigUprobe() + if err != nil { + return fmt.Errorf("could not setup config probe for service %s: %w", procInfo.ServiceName, err) + } + + r, err := ringbuf.NewReader(m) + if err != nil { + return fmt.Errorf("could not read from config probe %s", procInfo.ServiceName) + } + + go cm.readConfigs(r, procInfo) + + return nil +} + +func (cm *RCConfigManager) readConfigs(r *ringbuf.Reader, procInfo *ditypes.ProcessInfo) { + log.Tracef("Waiting for configs for service: %s", procInfo.ServiceName) + for { + record, err := r.Read() + if err != nil { + log.Errorf("error reading raw configuration from bpf: %v", err) + continue + } + + configEventParams, err := eventparser.ParseParams(record.RawSample) + if err != nil { + log.Errorf("error parsing configuration for PID %d: %v", procInfo.PID, err) + continue + } + if len(configEventParams) != 3 { + log.Errorf("error parsing configuration for PID %d: not enough arguments", procInfo.PID) + continue + } + + runtimeID, err := uuid.ParseBytes([]byte(configEventParams[0].ValueStr)) + if err != nil { + log.Errorf("Runtime ID \"%s\" is not a UUID: %v)", runtimeID, err) + continue + } + + configPath, err := ditypes.ParseConfigPath(string(configEventParams[1].ValueStr)) + if err != nil { + log.Errorf("couldn't parse config path: %v", err) + continue + } + + // An empty config means that this probe has been removed for this process + if configEventParams[2].ValueStr == "" { + cm.diProcs.DeleteProbe(procInfo.PID, configPath.ProbeUUID.String()) + continue + } + + conf := rcConfig{} + err = json.Unmarshal([]byte(configEventParams[2].ValueStr), &conf) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, configPath.ProbeUUID.String(), "ATTACH_ERROR", err.Error()) + log.Errorf("could not unmarshal configuration, cannot apply: %v (Probe-ID: %s)\n", err, configPath.ProbeUUID) + continue + } + + if conf.Capture.MaxReferenceDepth == 0 { + conf.Capture.MaxReferenceDepth = int(ditypes.MaxReferenceDepth) + } + if conf.Capture.MaxFieldCount == 0 { + conf.Capture.MaxFieldCount = int(ditypes.MaxFieldCount) + } + opts := &ditypes.InstrumentationOptions{ + CaptureParameters: ditypes.CaptureParameters, + ArgumentsMaxSize: ditypes.ArgumentsMaxSize, + StringMaxSize: ditypes.StringMaxSize, + MaxReferenceDepth: conf.Capture.MaxReferenceDepth, + MaxFieldCount: conf.Capture.MaxFieldCount, + } + + probe, probeExists := procInfo.ProbesByID[configPath.ProbeUUID.String()] + if !probeExists { + cm.diProcs.SetProbe(procInfo.PID, procInfo.ServiceName, conf.Where.TypeName, conf.Where.MethodName, configPath.ProbeUUID, runtimeID, opts) + diagnostics.Diagnostics.SetStatus(procInfo.ServiceName, runtimeID.String(), configPath.ProbeUUID.String(), ditypes.StatusReceived) + probe = procInfo.ProbesByID[configPath.ProbeUUID.String()] + } + + // Check hash to see if the configuration changed + if configPath.Hash != probe.InstrumentationInfo.ConfigurationHash { + probe.InstrumentationInfo.ConfigurationHash = configPath.Hash + applyConfigUpdate(procInfo, probe) + } + } +} + +func applyConfigUpdate(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) { + log.Tracef("Applying config update: %v", probe) + err := AnalyzeBinary(procInfo) + if err != nil { + log.Errorf("couldn't inspect binary: %v\n", err) + return + } + +generateCompileAttach: + err = codegen.GenerateBPFParamsCode(procInfo, probe) + if err != nil { + log.Info("Couldn't generate BPF programs", err) + return + } + + err = ebpf.CompileBPFProgram(procInfo, probe) + if err != nil { + log.Info("Couldn't compile BPF object", err) + if !probe.InstrumentationInfo.AttemptedRebuild { + log.Info("Removing parameters and attempting to rebuild BPF object", err) + probe.InstrumentationInfo.AttemptedRebuild = true + probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters = false + goto generateCompileAttach + } + return + } + + err = ebpf.AttachBPFUprobe(procInfo, probe) + if err != nil { + log.Info("Couldn't load and attach bpf programs", err) + if !probe.InstrumentationInfo.AttemptedRebuild { + log.Info("Removing parameters and attempting to rebuild BPF object", err) + probe.InstrumentationInfo.AttemptedRebuild = true + probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters = false + goto generateCompileAttach + } + return + } +} + +func newConfigProbe() *ditypes.Probe { + return &ditypes.Probe{ + ID: ditypes.ConfigBPFProbeID, + FuncName: "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer.passProbeConfiguration", + InstrumentationInfo: &ditypes.InstrumentationInfo{ + InstrumentationOptions: &ditypes.InstrumentationOptions{ + ArgumentsMaxSize: 100000, + StringMaxSize: 30000, + MaxFieldCount: int(ditypes.MaxFieldCount), + MaxReferenceDepth: 8, + CaptureParameters: true, + }, + }, + RateLimiter: ratelimiter.NewSingleEventRateLimiter(0), + } +} diff --git a/pkg/dynamicinstrumentation/diconfig/dwarf.go b/pkg/dynamicinstrumentation/diconfig/dwarf.go new file mode 100644 index 0000000000000..96e6d0e385660 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/dwarf.go @@ -0,0 +1,643 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package diconfig + +import ( + "cmp" + "debug/dwarf" + "debug/elf" + "fmt" + "io" + "reflect" + "slices" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/go-delve/delve/pkg/dwarf/godwarf" +) + +func getTypeMap(dwarfData *dwarf.Data, targetFunctions map[string]bool) (*ditypes.TypeMap, error) { + return loadFunctionDefinitions(dwarfData, targetFunctions) +} + +var dwarfMap = make(map[string]*dwarf.Data) + +type seenTypeCounter struct { + parameter *ditypes.Parameter + count uint8 +} + +var seenTypes = make(map[string]*seenTypeCounter) + +func loadFunctionDefinitions(dwarfData *dwarf.Data, targetFunctions map[string]bool) (*ditypes.TypeMap, error) { + entryReader := dwarfData.Reader() + typeReader := dwarfData.Reader() + readingAFunction := false + var funcName string + + var result = ditypes.TypeMap{ + Functions: make(map[string][]ditypes.Parameter), + InlinedFunctions: make(map[uint64][]*dwarf.Entry), + } + + var ( + name string + typeFields *ditypes.Parameter + ) + +entryLoop: + for { + entry, err := entryReader.Next() + if err == io.EOF || entry == nil { + break + } + + if entryIsEmpty(entry) { + readingAFunction = false + continue entryLoop + } + + if entry.Tag == dwarf.TagCompileUnit { + + name, ok := entry.Val(dwarf.AttrName).(string) + if !ok { + continue entryLoop + } + ranges, err := dwarfData.Ranges(entry) + if err != nil { + log.Infof("couldnt retrieve ranges for compile unit %s: %s", name, err) + continue entryLoop + } + + for i := range ranges { + result.DeclaredFiles = append(result.DeclaredFiles, &ditypes.LowPCEntry{ + LowPC: ranges[i][0], + Entry: entry, + }) + } + } + + if entry.Tag == dwarf.TagInlinedSubroutine { + // This is a inlined function + for i := range entry.Field { + // Find it's high program counter (where it exits in the parent routine) + if entry.Field[i].Attr == dwarf.AttrHighpc { + + // The field for HighPC can be a constant or address, which are int64 and uint64 respectively + if entry.Field[i].Class == dwarf.ClassConstant { + result.InlinedFunctions[uint64(entry.Field[i].Val.(int64))] = + append([]*dwarf.Entry{entry}, result.InlinedFunctions[uint64(entry.Field[i].Val.(int64))]...) + } else if entry.Field[i].Class == dwarf.ClassAddress { + result.InlinedFunctions[entry.Field[i].Val.(uint64)] = + append([]*dwarf.Entry{entry}, result.InlinedFunctions[entry.Field[i].Val.(uint64)]...) + } + } + } + continue entryLoop + } + + if entry.Tag == dwarf.TagSubprogram { + + for _, field := range entry.Field { + if field.Attr == dwarf.AttrLowpc { + lowpc := field.Val.(uint64) + result.FunctionsByPC = append(result.FunctionsByPC, &ditypes.LowPCEntry{LowPC: lowpc, Entry: entry}) + } + } + + for _, field := range entry.Field { + if field.Attr == dwarf.AttrName { + funcName = field.Val.(string) + if !targetFunctions[funcName] { + continue entryLoop + } + result.Functions[funcName] = make([]ditypes.Parameter, 0) + readingAFunction = true + continue entryLoop + } + } + } + + if !readingAFunction { + continue + } + + if entry.Tag != dwarf.TagFormalParameter { + readingAFunction = false + continue entryLoop + } + + // This branch should only be reached if we're currently reading ditypes.Parameters of a function + // Meaning: This is a formal ditypes.Parameter entry, and readingAFunction = true + + // Go through fields of the entry collecting type, name, size information + for i := range entry.Field { + + // ditypes.Parameter name + if entry.Field[i].Attr == dwarf.AttrName { + name = entry.Field[i].Val.(string) + } + + // Collect information about the type of this ditypes.Parameter + if entry.Field[i].Attr == dwarf.AttrType { + + typeReader.Seek(entry.Field[i].Val.(dwarf.Offset)) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, err + } + + typeFields, err = expandTypeData(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("error while parsing debug information: %w", err) + } + + } + } + + if typeFields != nil { + // We've collected information about this ditypes.Parameter, append it to the slice of ditypes.Parameters for this function + typeFields.Name = name + result.Functions[funcName] = append(result.Functions[funcName], *typeFields) + } + seenTypes = make(map[string]*seenTypeCounter) // reset seen types map for next parameter + } + + // Sort program counter slice for lookup when resolving pcs->functions + slices.SortFunc(result.FunctionsByPC, func(a, b *ditypes.LowPCEntry) int { + return cmp.Compare(b.LowPC, a.LowPC) + }) + slices.SortFunc(result.DeclaredFiles, func(a, b *ditypes.LowPCEntry) int { + return cmp.Compare(b.LowPC, a.LowPC) + }) + + return &result, nil +} + +func loadDWARF(binaryPath string) (*dwarf.Data, error) { + if dwarfData, ok := dwarfMap[binaryPath]; ok { + return dwarfData, nil + } + elfFile, err := elf.Open(binaryPath) + if err != nil { + return nil, fmt.Errorf("couldn't open elf binary: %w", err) + } + + dwarfData, err := elfFile.DWARF() + if err != nil { + return nil, fmt.Errorf("couldn't retrieve debug info from elf: %w", err) + } + dwarfMap[binaryPath] = dwarfData + return dwarfData, nil +} + +func expandTypeData(offset dwarf.Offset, dwarfData *dwarf.Data) (*ditypes.Parameter, error) { + typeReader := dwarfData.Reader() + + typeReader.Seek(offset) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get type entry: %w", err) + } + + if !entryTypeIsSupported(typeEntry) { + return resolveUnsupportedEntry(typeEntry), nil + } + + if typeEntry.Tag == dwarf.TagTypedef { + typeEntry, err = resolveTypedefToRealType(typeEntry, typeReader) + if err != nil { + return nil, err + } + } + + typeName, typeSize, typeKind := getTypeEntryBasicInfo(typeEntry) + typeHeader := ditypes.Parameter{ + Type: typeName, + TotalSize: typeSize, + Kind: typeKind, + } + + v, typeParsedAlready := seenTypes[typeHeader.Type] + if typeParsedAlready { + v.count++ + if v.count >= ditypes.MaxReferenceDepth { + return v.parameter, nil + } + } else { + seenTypes[typeHeader.Type] = &seenTypeCounter{ + parameter: &typeHeader, + count: 1, + } + } + + if typeKind == uint(reflect.Slice) { + sliceElements, err := getSliceField(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("could not collect fields of slice type: %w", err) + } + typeHeader = sliceElements[0] + } else if typeEntry.Tag == dwarf.TagStructType && typeName != "string" { + structFields, err := getStructFields(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("could not collect fields of struct type of ditypes.Parameter: %w", err) + } + typeHeader.ParameterPieces = structFields + } else if typeEntry.Tag == dwarf.TagArrayType { + arrayElements, err := getIndividualArrayElements(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("could not get length of array: %w", err) + } + typeHeader.ParameterPieces = arrayElements + } else if typeEntry.Tag == dwarf.TagPointerType { + pointerElements, err := getPointerLayers(typeEntry.Offset, dwarfData) + if err != nil { + return nil, fmt.Errorf("could not find pointer type: %w", err) + } + typeHeader.ParameterPieces = pointerElements + } + + return &typeHeader, nil +} + +// getSliceField returns the representation of a slice as a []ditypes.Parameter. The returned +// slice will have only one element. +// +// Slices are represented internally in go as a struct with 3 fields. The pointer to the +// the underlying array, the array length, and the array capacity. +func getSliceField(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { + typeReader := dwarfData.Reader() + + typeReader.Seek(offset) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get slice type entry: %w", err) + } + + elementTypeName, elementTypeSize, elementTypeKind := getTypeEntryBasicInfo(typeEntry) + sliceParameter := ditypes.Parameter{ + Type: elementTypeName, + TotalSize: elementTypeSize, + Kind: elementTypeKind, + } + + arrayEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get slice type entry: %w", err) + } + + for i := range arrayEntry.Field { + if arrayEntry.Field[i].Attr == dwarf.AttrType { + typeReader.Seek(arrayEntry.Field[i].Val.(dwarf.Offset)) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, err + } + underlyingType, err := expandTypeData(typeEntry.Offset, dwarfData) + if err != nil { + return nil, err + } + sliceParameter.ParameterPieces = append(sliceParameter.ParameterPieces, underlyingType.ParameterPieces[0]) + } + } + return []ditypes.Parameter{sliceParameter}, nil +} + +func getIndividualArrayElements(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { + savedArrayEntryOffset := offset + typeReader := dwarfData.Reader() + + // Go to the entry of the array type to get the underlying type information + typeReader.Seek(offset) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get array type entry: %w", err) + } + + var ( + elementFields *ditypes.Parameter + elementTypeName string + elementTypeSize int64 + elementTypeKind uint + ) + underlyingType, err := followType(typeEntry, dwarfData.Reader()) + if err != nil { + return nil, fmt.Errorf("could not get underlying array type's type entry: %w", err) + } + if !entryTypeIsSupported(underlyingType) { + elementFields = resolveUnsupportedEntry(underlyingType) + elementTypeName, elementTypeSize, elementTypeKind = getTypeEntryBasicInfo(underlyingType) + } else { + arrayElementTypeEntry, err := resolveTypedefToRealType(underlyingType, typeReader) + if err != nil { + return nil, err + } + + elementFields, err = expandTypeData(arrayElementTypeEntry.Offset, dwarfData) + if err != nil { + return nil, err + } + + elementTypeName, elementTypeSize, elementTypeKind = getTypeEntryBasicInfo(arrayElementTypeEntry) + } + + // Return back to entry of array so we can go to the subrange entry after the type, which gives + // us the length of the array + typeReader.Seek(savedArrayEntryOffset) + _, err = typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not find array entry: %w", err) + } + subrangeEntry, err := typeReader.Next() + if err != nil { + return nil, fmt.Errorf("could not get length of array: %w", err) + } + + var arrayLength int64 + for h := range subrangeEntry.Field { + if subrangeEntry.Field[h].Attr == dwarf.AttrCount { + arrayLength = subrangeEntry.Field[h].Val.(int64) + } + } + + arrayElements := []ditypes.Parameter{} + for h := 0; h < int(arrayLength); h++ { + newParam := ditypes.Parameter{} + copyTree(&newParam.ParameterPieces, &elementFields.ParameterPieces) + newParam.Name = fmt.Sprintf("[%d]%s[%d]", arrayLength, elementTypeName, h) + newParam.Type = elementTypeName + newParam.Kind = elementTypeKind + newParam.TotalSize = elementTypeSize + arrayElements = append(arrayElements, newParam) + } + + return arrayElements, nil +} + +func getStructFields(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { + inOrderReader := dwarfData.Reader() + typeReader := dwarfData.Reader() + + structFields := []ditypes.Parameter{} + fieldEntry := &dwarf.Entry{} + + // Start at the entry of the definition of the struct + inOrderReader.Seek(offset) + _, err := inOrderReader.Next() + if err != nil { + return structFields, err + } + + // From the struct entry in DWARF, traverse through subsequent DWARF entries + // which are fields of the struct + for { + fieldEntry, err = inOrderReader.Next() + if err != nil { + return []ditypes.Parameter{}, err + } + + if entryIsEmpty(fieldEntry) || fieldEntry.Tag != dwarf.TagMember { + break + } + + newStructField := ditypes.Parameter{} + + for i := range fieldEntry.Field { + + // Struct Field Name + if fieldEntry.Field[i].Attr == dwarf.AttrName { + newStructField.Name = fieldEntry.Field[i].Val.(string) + } + + // Struct Field Type + if fieldEntry.Field[i].Attr == dwarf.AttrType { + typeReader.Seek(fieldEntry.Field[i].Val.(dwarf.Offset)) + typeEntry, err := typeReader.Next() + if err != nil { + return []ditypes.Parameter{}, err + } + + if !entryTypeIsSupported(typeEntry) { + unsupportedType := resolveUnsupportedEntry(typeEntry) + structFields = append(structFields, *unsupportedType) + continue + } + + if typeEntry.Tag == dwarf.TagTypedef { + typeEntry, err = resolveTypedefToRealType(typeEntry, typeReader) + if err != nil { + return []ditypes.Parameter{}, err + } + } + + newStructField.Type, newStructField.TotalSize, newStructField.Kind = getTypeEntryBasicInfo(typeEntry) + if typeEntry.Tag != dwarf.TagBaseType { + field, err := expandTypeData(typeEntry.Offset, dwarfData) + if err != nil { + return []ditypes.Parameter{}, err + } + field.Name = newStructField.Name + structFields = append(structFields, *field) + } else { + structFields = append(structFields, newStructField) + } + } + } + } + return structFields, nil +} + +func getPointerLayers(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { + typeReader := dwarfData.Reader() + typeReader.Seek(offset) + pointerEntry, err := typeReader.Next() + if err != nil { + return nil, err + } + var underlyingType *ditypes.Parameter + for i := range pointerEntry.Field { + + if pointerEntry.Field[i].Attr == dwarf.AttrType { + typeReader.Seek(pointerEntry.Field[i].Val.(dwarf.Offset)) + typeEntry, err := typeReader.Next() + if err != nil { + return nil, err + } + + underlyingType, err = expandTypeData(typeEntry.Offset, dwarfData) + if err != nil { + return nil, err + } + } + } + if underlyingType == nil { + return []ditypes.Parameter{}, nil + } + return []ditypes.Parameter{*underlyingType}, nil +} + +// Can use `Children` field, but there's also always a NULL/empty entry at the end of entry trees. +func entryIsEmpty(e *dwarf.Entry) bool { + return !e.Children && + len(e.Field) == 0 && + e.Offset == 0 && + e.Tag == dwarf.Tag(0) +} + +func getTypeEntryBasicInfo(typeEntry *dwarf.Entry) (typeName string, typeSize int64, typeKind uint) { + if typeEntry.Tag == dwarf.TagPointerType { + typeSize = 8 // On 64 bit, all pointers are 8 bytes + } + for i := range typeEntry.Field { + if typeEntry.Field[i].Attr == dwarf.AttrName { + typeName = typeEntry.Field[i].Val.(string) + } + if typeEntry.Field[i].Attr == dwarf.AttrByteSize { + typeSize = typeEntry.Field[i].Val.(int64) + } + if typeEntry.Field[i].Attr == godwarf.AttrGoKind { + typeKind = uint(typeEntry.Field[i].Val.(int64)) + if typeKind == 0 { + // Temporary fix for bug: https://github.com/golang/go/issues/64231 + switch typeEntry.Tag { + case dwarf.TagStructType: + typeKind = uint(reflect.Struct) + case dwarf.TagArrayType: + typeKind = uint(reflect.Array) + case dwarf.TagPointerType: + typeKind = uint(reflect.Pointer) + default: + log.Info("Unexpected AttrGoKind == 0 for", typeEntry.Tag) + } + } + } + } + return +} + +func followType(outerType *dwarf.Entry, reader *dwarf.Reader) (*dwarf.Entry, error) { + for i := range outerType.Field { + if outerType.Field[i].Attr == dwarf.AttrType { + reader.Seek(outerType.Field[i].Val.(dwarf.Offset)) + nextType, err := reader.Next() + if err != nil { + return nil, fmt.Errorf("error while retrieving underlying type: %w", err) + } + return nextType, nil + } + } + return outerType, nil +} + +// resolveTypedefToRealType is used to get the underlying type of fields/variables/parameters when +// go packages the type underneath a typdef DWARF entry. The typedef DWARF entry has a 'type' entry +// which points to the actual type, which is what this function 'resolves'. +// Typedef's are used in for structs, pointers, maps, and likely other types. +func resolveTypedefToRealType(outerType *dwarf.Entry, reader *dwarf.Reader) (*dwarf.Entry, error) { + + if outerType.Tag == dwarf.TagTypedef { + followedType, err := followType(outerType, reader) + if err != nil { + return nil, err + } + + if followedType.Tag == dwarf.TagTypedef { + return resolveTypedefToRealType(followedType, reader) + } + return followedType, nil + } + + return outerType, nil +} + +func correctStructSizes(params []ditypes.Parameter) { + for i := range params { + correctStructSize(¶ms[i]) + } +} + +// correctStructSize sets the size of structs to the number of fields in the struct +func correctStructSize(param *ditypes.Parameter) { + if len(param.ParameterPieces) == 0 { + return + } + if param.Kind == uint(reflect.Struct) || param.Kind == uint(reflect.Array) { + param.TotalSize = int64(len(param.ParameterPieces)) + } + for i := range param.ParameterPieces { + correctStructSize(¶m.ParameterPieces[i]) + } +} + +func copyTree(dst, src *[]ditypes.Parameter) { + if dst == nil || src == nil || len(*src) == 0 { + return + } + *dst = make([]ditypes.Parameter, len(*src)) + copy(*dst, *src) + for i := range *src { + copyTree(&((*dst)[i].ParameterPieces), &((*src)[i].ParameterPieces)) + } +} + +func kindIsSupported(k reflect.Kind) bool { + if k == reflect.Map || + k == reflect.UnsafePointer || + k == reflect.Chan { + return false + } + return true +} + +func typeIsSupported(t string) bool { + return t != "unsafe.Pointer" +} + +func entryTypeIsSupported(e *dwarf.Entry) bool { + for f := range e.Field { + + if e.Field[f].Attr == godwarf.AttrGoKind { + kindOfTypeEntry := reflect.Kind(e.Field[f].Val.(int64)) + if !kindIsSupported(kindOfTypeEntry) { + return false + } + } + + if e.Field[f].Attr == dwarf.AttrName { + if !typeIsSupported(e.Field[f].Val.(string)) { + return false + } + } + } + return true +} + +func resolveUnsupportedEntry(e *dwarf.Entry) *ditypes.Parameter { + var ( + kind uint + name string + ) + for f := range e.Field { + if e.Field[f].Attr == godwarf.AttrGoKind { + kind = uint(e.Field[f].Val.(int64)) + } + if e.Field[f].Attr == dwarf.AttrName { + name = e.Field[f].Val.(string) + } + } + if name == "unsafe.Pointer" { + // The DWARF entry for unsafe.Pointer doesn't have a `kind` field + kind = uint(reflect.UnsafePointer) + } + return &ditypes.Parameter{ + Type: fmt.Sprintf("unsupported-%s", reflect.Kind(kind).String()), + Kind: kind, + NotCaptureReason: ditypes.Unsupported, + } +} diff --git a/pkg/dynamicinstrumentation/diconfig/file_config_manager.go b/pkg/dynamicinstrumentation/diconfig/file_config_manager.go new file mode 100644 index 0000000000000..3f495ee97c4e4 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/file_config_manager.go @@ -0,0 +1,230 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package diconfig + +import ( + "encoding/json" + "fmt" + "reflect" + "sync" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/proctracker" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/util" +) + +// FileWatchingConfigManager is used to track updates to a specified file +// which contains probe configurations +type FileWatchingConfigManager struct { + sync.Mutex + configTracker *configTracker + procTracker *proctracker.ProcessTracker + + callback configUpdateCallback + configs configsByService + state ditypes.DIProcs +} + +type fileConfigCallback func(configsByService) + +type configsByService = map[ditypes.ServiceName]map[ditypes.ProbeID]rcConfig + +// NewFileConfigManager creates a FileWatchingConfigManager set up to track +// the specified file. +func NewFileConfigManager(configFile string) (*FileWatchingConfigManager, error) { + cm := &FileWatchingConfigManager{ + callback: applyConfigUpdate, + } + + cm.procTracker = proctracker.NewProcessTracker(cm.updateProcessInfo) + err := cm.procTracker.Start() + if err != nil { + return nil, err + } + + cm.configTracker = newFileWatchingConfigTracker(configFile, cm.updateServiceConfigs) + err = cm.configTracker.Start() + if err != nil { + return nil, err + } + return cm, nil +} + +// GetProcInfos returns the state of the FileWatchingConfigManager +func (cm *FileWatchingConfigManager) GetProcInfos() ditypes.DIProcs { + return cm.state +} + +// Stop closes the config and proc trackers used by the FileWatchingConfigManager +func (cm *FileWatchingConfigManager) Stop() { + cm.configTracker.Stop() + cm.procTracker.Stop() +} + +func newFileWatchingConfigTracker(configFile string, onConfigUpdate fileConfigCallback) *configTracker { + ct := configTracker{ + ConfigPath: configFile, + configCallback: onConfigUpdate, + stopChannel: make(chan bool), + } + + return &ct +} + +// correlate this new configuration with a running service, +// and operate on the new global state of services/configs +// via cm.callback +func (cm *FileWatchingConfigManager) updateServiceConfigs(configs configsByService) { + log.Info("Updating config from file:", configs) + cm.configs = configs + err := cm.update() + if err != nil { + log.Info(err) + } +} + +func (cm *FileWatchingConfigManager) updateProcessInfo(procs ditypes.DIProcs) { + cm.Lock() + defer cm.Unlock() + log.Info("Updating procs", procs) + cm.configTracker.UpdateProcesses(procs) + err := cm.update() + if err != nil { + log.Info(err) + } +} + +type configTracker struct { + Processes map[ditypes.PID]*ditypes.ProcessInfo + ConfigPath string + configCallback fileConfigCallback + stopChannel chan bool +} + +func (ct *configTracker) Start() error { + fw := util.NewFileWatcher(ct.ConfigPath) + updateChan, err := fw.Watch() + if err != nil { + return fmt.Errorf("failed to watch config file %s: %s", ct.ConfigPath, err) + } + + go func(updateChan <-chan []byte) { + configUpdateLoop: + for { + select { + case rawConfigBytes := <-updateChan: + conf := map[string]map[string]rcConfig{} + err = json.Unmarshal(rawConfigBytes, &conf) + if err != nil { + log.Infof("invalid config read from %s: %s", ct.ConfigPath, err) + continue + } + ct.configCallback(conf) + case <-ct.stopChannel: + break configUpdateLoop + } + } + }(updateChan) + return nil +} + +func (ct *configTracker) Stop() { + ct.stopChannel <- true +} + +// UpdateProcesses is the callback interface that ConfigTracker uses to consume the map of ProcessInfo's +// such that it's used whenever there's an update to the state of known service processes on the machine. +// It simply overwrites the previous state of known service processes with the new one +func (ct *configTracker) UpdateProcesses(procs ditypes.DIProcs) { + current := procs + old := ct.Processes + if !reflect.DeepEqual(current, old) { + ct.Processes = current + } +} + +func (cm *FileWatchingConfigManager) update() error { + var updatedState = ditypes.NewDIProcs() + for serviceName, configsByID := range cm.configs { + for pid, proc := range cm.configTracker.Processes { + // If a config exists relevant to this proc + if proc.ServiceName == serviceName { + procCopy := *proc + updatedState[pid] = &procCopy + updatedState[pid].ProbesByID = convert(serviceName, configsByID) + } + } + } + + if !reflect.DeepEqual(cm.state, updatedState) { + err := inspectGoBinaries(updatedState) + if err != nil { + return err + } + + for pid, procInfo := range cm.state { + // cleanup dead procs + if _, running := updatedState[pid]; !running { + procInfo.CloseAllUprobeLinks() + delete(cm.state, pid) + } + } + + for pid, procInfo := range updatedState { + if _, tracked := cm.state[pid]; !tracked { + for _, probe := range procInfo.GetProbes() { + // install all probes from new process + cm.callback(procInfo, probe) + } + } else { + for _, existingProbe := range cm.state[pid].GetProbes() { + updatedProbe := procInfo.GetProbe(existingProbe.ID) + if updatedProbe == nil { + // delete old probes + cm.state[pid].DeleteProbe(existingProbe.ID) + } + } + for _, updatedProbe := range procInfo.GetProbes() { + existingProbe := cm.state[pid].GetProbe(updatedProbe.ID) + if !reflect.DeepEqual(existingProbe, updatedProbe) { + // update existing probes that changed + cm.callback(procInfo, updatedProbe) + } + } + } + } + cm.state = updatedState + } + return nil +} + +func convert(service string, configsByID map[ditypes.ProbeID]rcConfig) map[ditypes.ProbeID]*ditypes.Probe { + probesByID := map[ditypes.ProbeID]*ditypes.Probe{} + for id, config := range configsByID { + probesByID[id] = config.toProbe(service) + } + return probesByID +} + +func (rc *rcConfig) toProbe(service string) *ditypes.Probe { + return &ditypes.Probe{ + ID: rc.ID, + ServiceName: service, + FuncName: fmt.Sprintf("%s.%s", rc.Where.TypeName, rc.Where.MethodName), + InstrumentationInfo: &ditypes.InstrumentationInfo{ + InstrumentationOptions: &ditypes.InstrumentationOptions{ + CaptureParameters: ditypes.CaptureParameters, + ArgumentsMaxSize: ditypes.ArgumentsMaxSize, + StringMaxSize: ditypes.StringMaxSize, + MaxReferenceDepth: rc.Capture.MaxReferenceDepth, + }, + }, + } +} diff --git a/pkg/dynamicinstrumentation/ditypes/analysis.go b/pkg/dynamicinstrumentation/ditypes/analysis.go new file mode 100644 index 0000000000000..0aa4a698e5782 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/analysis.go @@ -0,0 +1,106 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import ( + "debug/dwarf" + "fmt" +) + +// TypeMap contains all the information about functions and their parameters including +// functions that have been inlined in the binary +type TypeMap struct { + // Functions maps fully-qualified function names to a slice of its parameters + Functions map[string][]Parameter + + // InlinedFunctions maps program counters to a slice of dwarf entries used + // when resolving stack traces that include inlined functions + InlinedFunctions map[uint64][]*dwarf.Entry + + // FunctionsByPC places DWARF subprogram (function) entries in order by + // its low program counter which is necessary for resolving stack traces + FunctionsByPC []*LowPCEntry + + // DeclaredFiles places DWARF compile unit entries in order by its + // low program counter which is necessary for resolving declared file + // for the sake of stack traces + DeclaredFiles []*LowPCEntry +} + +// Parameter represents a function parameter as read from DWARF info +type Parameter struct { + Name string + ID string + Type string + TotalSize int64 + Kind uint + Location Location + NotCaptureReason NotCaptureReason + ParameterPieces []Parameter +} + +func (p Parameter) String() string { + return fmt.Sprintf("%s %s", p.Name, p.Type) +} + +// NotCaptureReason is used to convey why a parameter was not captured +type NotCaptureReason uint8 + +const ( + Unsupported NotCaptureReason = iota + 1 // Unsupported means the data type of the parameter is unsupported + NoFieldLocation // NoFieldLocation means the parameter wasn't captured because location information is missing from analysis + FieldLimitReached // FieldLimitReached means the parameter wasn't captured because the data type has too many fields + CaptureDepthReached // CaptureDepthReached means the parameter wasn't captures because the data type has too many levels +) + +// SpecialKind is used for clarity in generated events that certain fields weren't read +type SpecialKind uint8 + +const ( + KindUnsupported = 255 - iota // KindUnsupported is for unsupported types + KindCutFieldLimit // KindCutFieldLimit is for fields that were cut because of field limit + KindCaptureDepthReached // KindCaptureDepthReached is for fields that were cut because of depth limit +) + +func (s SpecialKind) String() string { + switch s { + case KindUnsupported: + return "Unsupported" + case KindCutFieldLimit: + return "CutFieldLimit" + default: + return fmt.Sprintf("%d", s) + } +} + +// Location represents where a particular datatype is found on probe entry +type Location struct { + InReg bool + StackOffset int64 + Register int + NeedsDereference bool + PointerOffset uint64 +} + +func (l Location) String() string { + return fmt.Sprintf("Location{InReg: %t, StackOffset: %d, Register: %d}", l.InReg, l.StackOffset, l.Register) +} + +// LowPCEntry is a helper type used to sort DWARF entries by their low program counter +type LowPCEntry struct { + LowPC uint64 + Entry *dwarf.Entry +} + +// BPFProgram represents a bpf program that's created for a single probe +type BPFProgram struct { + ProgramText string + + // Used for bpf code generation + Probe *Probe +} diff --git a/pkg/dynamicinstrumentation/ditypes/config.go b/pkg/dynamicinstrumentation/ditypes/config.go new file mode 100644 index 0000000000000..06c0f826b33b7 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/config.go @@ -0,0 +1,337 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package ditypes contains various datatypes and otherwise shared components +// used by all the packages in dynamic instrumentation +package ditypes + +import ( + "debug/dwarf" + "fmt" + "io" + "strconv" + "strings" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "github.com/google/uuid" +) + +const ConfigBPFProbeID = "config" // ConfigBPFProbeID is the ID used for the config bpf program + +var ( + CaptureParameters = true // CaptureParameters is the default value for if probes should capture parameter values + ArgumentsMaxSize = 10000 // ArgumentsMaxSize is the default size in bytes of the output buffer used for param values + StringMaxSize = 512 // StringMaxSize is the default size in bytes of a single string + MaxReferenceDepth uint8 = 4 // MaxReferenceDepth is the default depth that DI will traverse datatypes for capturing values + MaxFieldCount = 20 // MaxFieldCount is the default limit for how many fields DI will capture in a single data type + SliceMaxSize = 1800 // SliceMaxSize is the default limit in bytes of a slice + SliceMaxLength = 100 // SliceMaxLength is the default limit in number of elements of a slice +) + +// ProbeID is the unique identifier for probes +type ProbeID = string + +// ServiceName is the unique identifier for a service +type ServiceName = string + +// PID stands for process ID +type PID = uint32 + +// DIProcs is the map that dynamic instrumentation uses for tracking processes and their relevant instrumentation info +type DIProcs map[PID]*ProcessInfo + +// NewDIProcs creates a new DIProcs map +func NewDIProcs() DIProcs { + return DIProcs{} +} + +// GetProbes returns the relevant probes information for a specific process +func (procs DIProcs) GetProbes(pid PID) []*Probe { + procInfo, ok := procs[pid] + if !ok { + return nil + } + return procInfo.GetProbes() +} + +// GetProbe returns the relevant probe information for a specific probe being instrumented +// in a specific process +func (procs DIProcs) GetProbe(pid PID, probeID ProbeID) *Probe { + procInfo, ok := procs[pid] + if !ok { + return nil + } + return procInfo.GetProbe(probeID) +} + +// SetProbe associates instrumentation information with a probe for a specific process +func (procs DIProcs) SetProbe(pid PID, service, typeName, method string, probeID, runtimeID uuid.UUID, opts *InstrumentationOptions) { + procInfo, ok := procs[pid] + if !ok { + return + } + probe := &Probe{ + ID: probeID.String(), + ServiceName: service, + FuncName: fmt.Sprintf("%s.%s", typeName, method), + InstrumentationInfo: &InstrumentationInfo{InstrumentationOptions: opts}, + } + + procInfo.ProbesByID[probeID.String()] = probe + // TODO: remove this from here + procInfo.RuntimeID = runtimeID.String() +} + +// DeleteProbe removes instrumentation for the specified probe +// in the specified process +func (procs DIProcs) DeleteProbe(pid PID, probeID ProbeID) { + procInfo, ok := procs[pid] + if !ok { + return + } + procInfo.DeleteProbe(probeID) +} + +// CloseUprobe closes the uprobe link for the specific probe (by ID) of +// a the specified process (by PID) +func (procs DIProcs) CloseUprobe(pid PID, probeID ProbeID) { + probe := procs.GetProbe(pid, probeID) + if probe == nil { + return + } + proc, ok := procs[pid] + if !ok || proc == nil { + log.Info("could not close uprobe, pid not found") + } + err := proc.CloseUprobeLink(probeID) + if err != nil { + log.Infof("could not close uprobe: %s", err) + } +} + +// SetRuntimeID sets the runtime ID for the specified process +func (procs DIProcs) SetRuntimeID(pid PID, runtimeID string) { + proc, ok := procs[pid] + if !ok || proc == nil { + log.Info("could not set runtime ID, pid not found") + } + proc.RuntimeID = runtimeID +} + +// ProcessInfo represents a process, it contains the information relevant to +// dynamic instrumentation for this specific process +type ProcessInfo struct { + PID uint32 + ServiceName string + RuntimeID string + BinaryPath string + + TypeMap *TypeMap + DwarfData *dwarf.Data + + ConfigurationUprobe *link.Link + ProbesByID ProbesByID + InstrumentationUprobes map[ProbeID]*link.Link + InstrumentationObjects map[ProbeID]*ebpf.Collection +} + +// SetupConfigUprobe sets the configuration probe for the process +func (pi *ProcessInfo) SetupConfigUprobe() (*ebpf.Map, error) { + configProbe, ok := pi.ProbesByID[ConfigBPFProbeID] + if !ok { + return nil, fmt.Errorf("config probe was not set for process %s", pi.ServiceName) + } + + configLink, ok := pi.InstrumentationUprobes[ConfigBPFProbeID] + if !ok { + return nil, fmt.Errorf("config uprobe was not set for process %s", pi.ServiceName) + } + pi.ConfigurationUprobe = configLink + delete(pi.InstrumentationUprobes, ConfigBPFProbeID) + + m, ok := pi.InstrumentationObjects[configProbe.ID].Maps["events"] + if !ok { + return nil, fmt.Errorf("config ringbuffer was not set for process %s", pi.ServiceName) + } + return m, nil +} + +// CloseConfigUprobe closes the uprobe connection for the configuration probe +func (pi *ProcessInfo) CloseConfigUprobe() error { + if pi.ConfigurationUprobe != nil { + return (*pi.ConfigurationUprobe).Close() + } + return nil +} + +// SetUprobeLink associates the uprobe link with the specified probe +// in the tracked process +func (pi *ProcessInfo) SetUprobeLink(probeID ProbeID, l *link.Link) { + pi.InstrumentationUprobes[probeID] = l +} + +// CloseUprobeLink closes the probe and deletes the link for the probe +// in the tracked process +func (pi *ProcessInfo) CloseUprobeLink(probeID ProbeID) error { + if l, ok := pi.InstrumentationUprobes[probeID]; ok { + err := (*l).Close() + delete(pi.InstrumentationUprobes, probeID) + return err + } + return nil +} + +// CloseAllUprobeLinks closes all probes and deletes their links for all probes +// in the tracked process +func (pi *ProcessInfo) CloseAllUprobeLinks() { + for probeID := range pi.InstrumentationUprobes { + if err := pi.CloseUprobeLink(probeID); err != nil { + log.Info("Failed to close uprobe link for probe", pi.BinaryPath, pi.PID, probeID, err) + } + } + err := pi.CloseConfigUprobe() + if err != nil { + log.Info("Failed to close config uprobe for process", pi.BinaryPath, pi.PID, err) + } +} + +// GetProbes returns references to each probe in the associated process +func (pi *ProcessInfo) GetProbes() []*Probe { + probes := make([]*Probe, 0, len(pi.ProbesByID)) + for _, probe := range pi.ProbesByID { + probes = append(probes, probe) + } + return probes +} + +// GetProbe returns a reference to the specified probe in the associated process +func (pi *ProcessInfo) GetProbe(probeID ProbeID) *Probe { + return pi.ProbesByID[probeID] +} + +// DeleteProbe closes the uprobe link and disassociates the probe in the associated process +func (pi *ProcessInfo) DeleteProbe(probeID ProbeID) { + err := pi.CloseUprobeLink(probeID) + if err != nil { + log.Infof("could not close uprobe link: %s", err) + } + delete(pi.ProbesByID, probeID) +} + +// ProbesByID maps probe IDs with probes +type ProbesByID = map[ProbeID]*Probe + +// FieldIdentifier is a tuple of struct names and field names +type FieldIdentifier struct { + StructName, FieldName string +} + +// InstrumentationInfo contains information used while setting up probes +type InstrumentationInfo struct { + InstrumentationOptions *InstrumentationOptions + + // BPFParametersSourceCode is the source code needed for capturing parameters via this probe + BPFParametersSourceCode string + + // BPFSourceCode is the source code of the BPF program attached via this probe + BPFSourceCode string + + // BPFObjectFileReader is the compiled BPF program attached via this probe + BPFObjectFileReader io.ReaderAt + + ConfigurationHash string + + // Toggle for whether or not the BPF object was rebuilt after changing parameters + AttemptedRebuild bool +} + +// InstrumentationOptions is a set of options for how data should be captured by probes +type InstrumentationOptions struct { + CaptureParameters bool + ArgumentsMaxSize int + StringMaxSize int + MaxReferenceDepth int + MaxFieldCount int + SliceMaxSize int + SliceMaxLength int +} + +// Probe represents a location in a GoProgram that can be instrumented +// dynamically. It contains information about the service and the function +// associated with the probe. +type Probe struct { + // ID is a unique identifier for the probe. + ID string + + // ServiceName is the name of the service in which the probe should be placed. + ServiceName string + + // FuncName is the name of the function that triggers the probe. + FuncName string + + InstrumentationInfo *InstrumentationInfo + + RateLimiter *ratelimiter.SingleRateLimiter +} + +// GetBPFFuncName cleans the function name to be allowed by the bpf compiler +func (p *Probe) GetBPFFuncName() string { + // can't have '.', '-' or '/' in bpf program name + replacer := strings.NewReplacer(".", "_", "/", "_", "-", "_") + return replacer.Replace(p.FuncName) +} + +// ConfigPath is a remote-config specific representation which is used for retrieving probe definitions +type ConfigPath struct { + OrgID int64 + Product string + ProbeType string + ProbeUUID uuid.UUID + Hash string +} + +// ParseConfigPath takes the remote-config specific string and parses a ConfigPath object out of it +// the string is expected to be datadog///_/ +func ParseConfigPath(str string) (*ConfigPath, error) { + parts := strings.Split(str, "/") + if len(parts) != 5 { + return nil, fmt.Errorf("failed to parse config path %s", str) + } + orgIDStr, product, probeIDStr, hash := parts[1], parts[2], parts[3], parts[4] + orgID, err := strconv.ParseInt(orgIDStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse orgID %s (from %s)", orgIDStr, str) + } + if product != "LIVE_DEBUGGING" { + return nil, fmt.Errorf("product %s not supported (from %s)", product, str) + } + + typeAndID := strings.Split(probeIDStr, "_") + if len(typeAndID) != 2 { + return nil, fmt.Errorf("failed to parse probe type and UUID %s (from %s)", probeIDStr, str) + } + probeType, probeUUIDStr := typeAndID[0], typeAndID[1] + if probeType != "logProbe" { + return nil, fmt.Errorf("probe type %s not supported (from %s)", probeType, str) + } + probeUUID, err := uuid.Parse(probeUUIDStr) + if err != nil { + return nil, fmt.Errorf("failed to parse probeUUID %s (from %s)", probeUUIDStr, str) + } + + return &ConfigPath{ + OrgID: orgID, + Product: product, + ProbeType: probeType, + ProbeUUID: probeUUID, + Hash: hash, + }, nil +} diff --git a/pkg/dynamicinstrumentation/ditypes/config_test.go b/pkg/dynamicinstrumentation/ditypes/config_test.go new file mode 100644 index 0000000000000..9fefba03063cd --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/config_test.go @@ -0,0 +1,44 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import ( + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func TestParseConfigPath(t *testing.T) { + expectedUUID, err := uuid.Parse("f0b49f3e-8364-448d-97e9-3e640c4a21e6") + assert.NoError(t, err) + + configPath, err := ParseConfigPath("datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51") + assert.NoError(t, err) + + assert.NoError(t, err) + assert.Equal(t, int64(2), configPath.OrgID) + assert.Equal(t, "LIVE_DEBUGGING", configPath.Product) + assert.Equal(t, "logProbe", configPath.ProbeType) + assert.Equal(t, expectedUUID, configPath.ProbeUUID) + assert.Equal(t, "51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51", configPath.Hash) +} + +func TestParseConfigPathErrors(t *testing.T) { + tcs := []string{ + "datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6", + "datadog/2/NOT_SUPPORTED/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51", + "datadog/2/LIVE_DEBUGGING/notSupported_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51", + "datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-8364-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51/extra", + "datadog/2/LIVE_DEBUGGING/logProbe_f0b49f3e-xxxx-448d-97e9-3e640c4a21e6/51fed9071414a7058c2ee96fc703f3e1fa51b5bffaab6155ce5c492303882b51", + } + for _, tc := range tcs { + _, err := ParseConfigPath(tc) + assert.Error(t, err) + } +} diff --git a/pkg/dynamicinstrumentation/ditypes/diagnostics.go b/pkg/dynamicinstrumentation/ditypes/diagnostics.go new file mode 100644 index 0000000000000..d28764057b9ba --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/diagnostics.go @@ -0,0 +1,52 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +// DiagnosticUpload is the message sent to the DataDog backend conveying diagnostic information +type DiagnosticUpload struct { + Service string `json:"service"` + DDSource string `json:"ddsource"` + + Debugger struct { + Diagnostic `json:"diagnostics"` + } `json:"debugger"` +} + +// SetError sets the error in the diagnostic upload +func (d *DiagnosticUpload) SetError(errorType, errorMessage string) { + d.Debugger.Diagnostic.Status = StatusError + d.Debugger.Diagnostic.DiagnosticException = &DiagnosticException{ + Type: errorType, + Message: errorMessage, + } +} + +// Status conveys the status of a probe +type Status string + +const ( + StatusReceived Status = "RECEIVED" // StatusReceived means the probe configuration was received + StatusInstalled Status = "INSTALLED" // StatusInstalled means the probe was installed + StatusEmitting Status = "EMITTING" // StatusEmitting means the probe is emitting events + StatusError Status = "ERROR" // StatusError means the probe has an issue +) + +// Diagnostic contains fields relevant for conveying the status of a probe +type Diagnostic struct { + RuntimeID string `json:"runtimeId"` + ProbeID string `json:"probeId"` + Status Status `json:"status"` + + *DiagnosticException `json:"exception,omitempty"` +} + +// DiagnosticException is used for diagnosing errors in probes +type DiagnosticException struct { + Type string `json:"type"` + Message string `json:"message"` +} diff --git a/pkg/dynamicinstrumentation/ditypes/ebpf.go b/pkg/dynamicinstrumentation/ditypes/ebpf.go new file mode 100644 index 0000000000000..40bc53fdf9d28 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/ebpf.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build ignore + +package ditypes + +/* +#include "../codegen/c/types.h" +*/ +import "C" + +type BaseEvent C.struct_base_event + +const SizeofBaseEvent = C.sizeof_struct_base_event diff --git a/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go b/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go new file mode 100644 index 0000000000000..d76d6c39c2105 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go @@ -0,0 +1,13 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -I ../../network/ebpf/c -I ../../ebpf/c -fsigned-char ebpf.go + +package ditypes + +type BaseEvent struct { + Probe_id [304]byte + Pid uint32 + Uid uint32 + Program_counters [10]uint64 +} + +const SizeofBaseEvent = 0x188 diff --git a/pkg/dynamicinstrumentation/ditypes/ringbuffer.go b/pkg/dynamicinstrumentation/ditypes/ringbuffer.go new file mode 100644 index 0000000000000..3e42586dc499c --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/ringbuffer.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import "github.com/cilium/ebpf" + +// EventsRingbuffer is the shared ringbuffer which all bpf programs use for communication +// with userspace +var EventsRingbuffer *ebpf.Map + +// DIEvent represents a single invocation of a function and it's captured information +type DIEvent struct { + ProbeID string + PID uint32 + UID uint32 + Argdata []*Param + StackPCs []uint64 +} + +// Param is the representation of a single function parameter after being parsed from +// the raw byte buffer sent from bpf +type Param struct { + ValueStr string `json:",omitempty"` + Type string + Size uint16 + Kind byte + Fields []*Param `json:",omitempty"` +} + +// StackFrame represents a single entry in a stack trace +type StackFrame struct { + FileName string `json:"fileName,omitempty"` + Function string `json:"function,omitempty"` + Line int `json:"lineNumber,omitempty"` +} + +// EventCallback is the function that is called everytime a new event is created +type EventCallback func(*DIEvent) diff --git a/pkg/dynamicinstrumentation/ditypes/snapshot.go b/pkg/dynamicinstrumentation/ditypes/snapshot.go new file mode 100644 index 0000000000000..44e7fcb35cdd2 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/snapshot.go @@ -0,0 +1,118 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import ( + "github.com/google/uuid" +) + +// SnapshotUpload is a single message sent to the datadog back containing the +// snapshot and metadata +type SnapshotUpload struct { + Service string `json:"service"` + Message string `json:"message"` + DDSource string `json:"ddsource"` + DDTags string `json:"ddtags"` + Logger struct { + Name string `json:"name"` + Method string `json:"method"` + Version int `json:"version,omitempty"` + ThreadID int `json:"thread_id,omitempty"` + ThreadName string `json:"thread_name,omitempty"` + } `json:"logger"` + + Debugger struct { + Snapshot `json:"snapshot"` + } `json:"debugger"` + + // TODO: check precision (ms, ns etc) + Duration int64 `json:"duration"` + + DD *TraceCorrelation `json:"dd,omitempty"` +} + +// Snapshot is a single instance of a function invocation and all +// captured data +type Snapshot struct { + ID *uuid.UUID `json:"id"` + Timestamp int64 `json:"timestamp"` + + Language string `json:"language"` + ProbeInSnapshot `json:"probe"` + + Captures `json:"captures"` + + Errors []EvaluationError `json:"evaluationErrors,omitempty"` + + Stack []StackFrame `json:"stack"` +} + +// Captures contains captured data at various points during a function invocation +type Captures struct { + Entry *Capture `json:"entry,omitempty"` + Return *Capture `json:"return,omitempty"` + + Lines map[string]Capture `json:"lines,omitempty"` +} + +// ProbeInSnapshot contains information about the probe that produced a snapshot +type ProbeInSnapshot struct { + ID string `json:"id"` + EvaluateAt string `json:"evaluateAt,omitempty"` + Tags string `json:"tags,omitempty"` + Version int `json:"version,omitempty"` + + ProbeLocation `json:"location"` +} + +// ProbeLocation represents where a snapshot was originally captured +type ProbeLocation struct { + Type string `json:"type,omitempty"` + Method string `json:"method,omitempty"` + Lines []string `json:"lines,omitempty"` + File string `json:"file,omitempty"` +} + +// CapturedValueMap maps type names to their values +type CapturedValueMap = map[string]*CapturedValue + +// Capture represents all the captured values in a snapshot +type Capture struct { + Arguments CapturedValueMap `json:"arguments,omitempty"` + Locals CapturedValueMap `json:"locals,omitempty"` +} + +// CapturedValue represents the value of a captured type +type CapturedValue struct { + Type string `json:"type"` + + // we use a string pointer so the empty string is marshalled + Value *string `json:"value,omitempty"` + + Fields map[string]*CapturedValue `json:"fields,omitempty"` + Entries [][]CapturedValue `json:"entries,omitempty"` + Elements []CapturedValue `json:"elements,omitempty"` + + NotCapturedReason string `json:"notCapturedReason,omitempty"` + IsNull bool `json:"isNull,omitempty"` + + Size string `json:"size,omitempty"` + Truncated bool `json:"truncated,omitempty"` +} + +// EvaluationError expresses why a value could not be evaluated +type EvaluationError struct { + Expr string `json:"expr"` + Message string `json:"message"` +} + +// TraceCorrelation contains fields that correlate a snapshot with traces +type TraceCorrelation struct { + TraceID string `json:"trace_id,omitempty"` + SpanID string `json:"span_id,omitempty"` +} diff --git a/pkg/dynamicinstrumentation/ditypes/snapshot_test.go b/pkg/dynamicinstrumentation/ditypes/snapshot_test.go new file mode 100644 index 0000000000000..e4496f4c6dc92 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/snapshot_test.go @@ -0,0 +1,50 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ditypes + +import ( + "encoding/json" + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDynamicInstrumentationLogJSONRoundTrip(t *testing.T) { + files := []string{ + "testdata/snapshot-00.json", + "testdata/snapshot-01.json", + "testdata/snapshot-02.json", + } + for _, filePath := range files { + file, err := os.Open(filePath) + if err != nil { + t.Error(err) + } + defer file.Close() + + bytes, err := io.ReadAll(file) + if err != nil { + t.Error(err) + } + + var s SnapshotUpload + err = json.Unmarshal(bytes, &s) + if err != nil { + t.Error(err) + } + + mBytes, err := json.Marshal(s) + if err != nil { + t.Error(err) + } + + assert.JSONEq(t, string(bytes), string(mBytes)) + } +} diff --git a/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json new file mode 100644 index 0000000000000..e92603672b4c6 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-00.json @@ -0,0 +1,402 @@ +{ + "service": "debugger-backend-api-monitor", + "ddsource": "dd_debugger", + "message": "Log probe executed successfully", + "duration": 763602, + "ddtags": "tag:value", + "logger": { + "thread_id": 91, + "method": "emitSnapshot", + "thread_name": "scheduled-executor-thread-16", + "name": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob", + "version": 2 + }, + "debugger": { + "snapshot": { + "stack": [ + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob.emitSnapshot", + "lineNumber": 89 + }, + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob.access$emitSnapshot", + "lineNumber": 31 + }, + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob$run$1.invoke", + "lineNumber": 63 + }, + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob$run$1.invoke", + "lineNumber": 55 + }, + { + "fileName": "MonitoredTask.kt", + "function": "com.datadog.debugger.MonitoredTask$ExecutionState.run", + "lineNumber": 30 + }, + { + "fileName": "MonitoredTask.kt", + "function": "com.datadog.debugger.MonitoredTask.run", + "lineNumber": 89 + }, + { + "fileName": "SnapshotReadAfterWriteMonitorJob.kt", + "function": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob.run", + "lineNumber": 55 + }, + { + "function": "com.datadog.debugger.apimonitor.$SnapshotReadAfterWriteMonitorJob$Definition$Exec.dispatch", + "lineNumber": -1 + }, + { + "fileName": "AbstractExecutableMethodsDefinition.java", + "function": "io.micronaut.context.AbstractExecutableMethodsDefinition$DispatchedExecutableMethod.invoke", + "lineNumber": 378 + }, + { + "fileName": "DelegatingExecutableMethod.java", + "function": "io.micronaut.inject.DelegatingExecutableMethod.invoke", + "lineNumber": 76 + }, + { + "fileName": "ScheduledMethodProcessor.java", + "function": "io.micronaut.scheduling.processor.ScheduledMethodProcessor.lambda$process$5", + "lineNumber": 127 + }, + { + "fileName": "Executors.java", + "function": "java.util.concurrent.Executors$RunnableAdapter.call", + "lineNumber": 577 + }, + { + "fileName": "FutureTask.java", + "function": "java.util.concurrent.FutureTask.runAndReset", + "lineNumber": 358 + }, + { + "fileName": "ScheduledThreadPoolExecutor.java", + "function": "java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run", + "lineNumber": 305 + }, + { + "fileName": "ThreadPoolExecutor.java", + "function": "java.util.concurrent.ThreadPoolExecutor.runWorker", + "lineNumber": 1144 + }, + { + "fileName": "ThreadPoolExecutor.java", + "function": "java.util.concurrent.ThreadPoolExecutor$Worker.run", + "lineNumber": 642 + }, + { + "fileName": "Thread.java", + "function": "java.lang.Thread.run", + "lineNumber": 1589 + } + ], + "captures": { + "entry": { + "arguments": { + "apiMonitorStr": { + "type": "java.lang.String", + "value": "red" + }, + "this": { + "type": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob", + "fields": { + "apiClient": { + "type": "com.datadog.debugger.monitor.utils.DebuggerConfigurationApiClient", + "fields": { + "converter": { + "type": "com.datadog.debugger.monitor.utils.JsonApiConverter", + "fields": { + "mapper": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.ObjectMapper" + } + } + }, + "rcApiClient": { + "type": "com.datadog.debugger.monitor.utils.AuthenticatingRcApiClient", + "fields": { + "apiClient": { + "notCapturedReason": "depth", + "type": "com.datadog.debugger.monitor.utils.RcApiClient$Intercepted" + }, + "mcnultyJobConfig": { + "notCapturedReason": "depth", + "type": "java.util.LinkedHashMap" + }, + "secretManager": { + "notCapturedReason": "depth", + "type": "com.datadog.debugger.monitor.utils.VaultSecretManager" + } + } + } + } + }, + "metrics": { + "type": "com.datadog.debugger.Metrics", + "fields": { + "statsd": { + "type": "com.timgroup.statsd.NonBlockingStatsDClient", + "fields": { + "clientChannel": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.UnixDatagramClientChannel" + }, + "handler": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.NonBlockingStatsDClient$1" + }, + "constantTagsRendered": { + "type": "java.lang.String", + "value": "|#version:v13250988-a801c320,env:prod,service:debugger-backend-api-monitor,dd.internal.entity_id:d1239294-1fe7-4188-9646-1bb7f59eb0b0" + }, + "statsDSender": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDSender" + }, + "telemetryClientChannel": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.UnixDatagramClientChannel" + }, + "prefix": { + "type": "java.lang.String", + "value": "dd.debugger_backend_api_monitor." + }, + "telemetryStatsDProcessor": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDNonBlockingProcessor" + }, + "blocking": { + "type": "boolean", + "value": "false" + }, + "statsDProcessor": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDNonBlockingProcessor" + }, + "telemetryStatsDSender": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDSender" + }, + "telemetry": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.Telemetry" + } + } + } + } + }, + "jobConfiguration": { + "type": "com.datadog.debugger.apimonitor.ApiMonitorJobConfigurations$SnapshotReadAfterWriteJobConfiguration", + "fields": { + "pollInterval": { + "type": "java.time.Duration", + "value": "PT1S" + }, + "orgId": { + "type": "long", + "value": "2" + }, + "timeout": { + "type": "java.time.Duration", + "value": "PT1M" + } + } + }, + "eventPlatformQueryClient": { + "type": "com.datadog.debugger.monitor.utils.EventPlatformQueryClientImpl", + "fields": { + "httpClient": { + "type": "com.datadog.debugger.monitor.utils.EventPlatformApiHttpClient$Intercepted", + "fields": { + "$interceptors": { + "notCapturedReason": "depth", + "type": "io.micronaut.aop.Interceptor[][]" + }, + "$proxyMethods": { + "notCapturedReason": "depth", + "type": "io.micronaut.inject.ExecutableMethod[]" + } + } + } + } + } + } + }, + "apiMonitorInt": { + "type": "int", + "value": "86" + } + } + }, + "return": { + "arguments": { + "apiMonitorStr": { + "type": "java.lang.String", + "value": "red" + }, + "this": { + "type": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob", + "fields": { + "apiClient": { + "type": "com.datadog.debugger.monitor.utils.DebuggerConfigurationApiClient", + "fields": { + "converter": { + "type": "com.datadog.debugger.monitor.utils.JsonApiConverter", + "fields": { + "mapper": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.ObjectMapper" + } + } + }, + "rcApiClient": { + "type": "com.datadog.debugger.monitor.utils.AuthenticatingRcApiClient", + "fields": { + "apiClient": { + "notCapturedReason": "depth", + "type": "com.datadog.debugger.monitor.utils.RcApiClient$Intercepted" + }, + "mcnultyJobConfig": { + "notCapturedReason": "depth", + "type": "java.util.LinkedHashMap" + }, + "secretManager": { + "notCapturedReason": "depth", + "type": "com.datadog.debugger.monitor.utils.VaultSecretManager" + } + } + } + } + }, + "metrics": { + "type": "com.datadog.debugger.Metrics", + "fields": { + "statsd": { + "type": "com.timgroup.statsd.NonBlockingStatsDClient", + "fields": { + "clientChannel": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.UnixDatagramClientChannel" + }, + "handler": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.NonBlockingStatsDClient$1" + }, + "constantTagsRendered": { + "type": "java.lang.String", + "value": "|#version:v13250988-a801c320,env:prod,service:debugger-backend-api-monitor,dd.internal.entity_id:d1239294-1fe7-4188-9646-1bb7f59eb0b0" + }, + "statsDSender": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDSender" + }, + "telemetryClientChannel": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.UnixDatagramClientChannel" + }, + "prefix": { + "type": "java.lang.String", + "value": "dd.debugger_backend_api_monitor." + }, + "telemetryStatsDProcessor": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDNonBlockingProcessor" + }, + "blocking": { + "type": "boolean", + "value": "false" + }, + "statsDProcessor": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDNonBlockingProcessor" + }, + "telemetryStatsDSender": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.StatsDSender" + }, + "telemetry": { + "notCapturedReason": "depth", + "type": "com.timgroup.statsd.Telemetry" + } + } + } + } + }, + "jobConfiguration": { + "type": "com.datadog.debugger.apimonitor.ApiMonitorJobConfigurations$SnapshotReadAfterWriteJobConfiguration", + "fields": { + "pollInterval": { + "type": "java.time.Duration", + "value": "PT1S" + }, + "orgId": { + "type": "long", + "value": "2" + }, + "timeout": { + "type": "java.time.Duration", + "value": "PT1M" + } + } + }, + "eventPlatformQueryClient": { + "type": "com.datadog.debugger.monitor.utils.EventPlatformQueryClientImpl", + "fields": { + "httpClient": { + "type": "com.datadog.debugger.monitor.utils.EventPlatformApiHttpClient$Intercepted", + "fields": { + "$interceptors": { + "notCapturedReason": "depth", + "type": "io.micronaut.aop.Interceptor[][]" + }, + "$proxyMethods": { + "notCapturedReason": "depth", + "type": "io.micronaut.inject.ExecutableMethod[]" + } + } + } + } + } + } + }, + "apiMonitorInt": { + "type": "int", + "value": "86" + } + }, + "locals": { + "uuid": { + "type": "java.lang.String", + "value": "328a0839-de9b-40fb-8c7f-f02972a0bceb" + }, + "@return": { + "type": "java.lang.String", + "value": "328a0839-de9b-40fb-8c7f-f02972a0bceb" + } + } + } + }, + "language": "java", + "id": "6e34e113-2bb3-44be-9330-79de17fab0fc", + "probe": { + "evaluateAt": "DEFAULT", + "location": { + "method": "emitSnapshot", + "type": "com.datadog.debugger.apimonitor.SnapshotReadAfterWriteMonitorJob" + }, + "id": "59e78a5b-fa9a-4056-a2bf-a4384769d1ae", + "version": 1 + }, + "timestamp": 1676045474719 + } + } +} \ No newline at end of file diff --git a/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json new file mode 100644 index 0000000000000..2cbade7901259 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-01.json @@ -0,0 +1,1660 @@ +{ + "service": "security-monitoring-entity-reducer", + "message": "Emitting entity with score", + "duration": 0, + "ddsource": "dd_debugger", + "ddtags": "env:prod,service:security-monitoring-entity-reducer", + "debugger": { + "snapshot": { + "stack": [ + { + "fileName": "EntityStatsOutput.java", + "function": "com.dd.logs.security_analytics.EntityStatsOutput.toByteString", + "lineNumber": 205 + }, + { + "fileName": "InternalIntakeReducerOutputEncoder.java", + "function": "com.dd.logs.rule_engine.outputs.internal_intake.InternalIntakeReducerOutputEncoder.encode", + "lineNumber": 31 + }, + { + "fileName": "InternalIntakeProducer.java", + "function": "com.dd.logs.internal_intake.producer.InternalIntakeProducer.processDatum", + "lineNumber": 116 + }, + { + "fileName": "WorkloadProcessor.java", + "function": "com.fsmatic.workload.WorkloadProcessor.onReceive", + "lineNumber": 332 + }, + { + "fileName": "AbstractActor.scala", + "function": "akka.actor.UntypedAbstractActor$$anonfun$receive$1.applyOrElse", + "lineNumber": 339 + }, + { + "fileName": "Actor.scala", + "function": "akka.actor.Actor.aroundReceive", + "lineNumber": 539 + }, + { + "fileName": "ActorSupport.java", + "function": "com.fsmatic.akka.ActorSupport.lambda$aroundReceive$0", + "lineNumber": 30 + }, + { + "function": "com.fsmatic.akka.ActorSupport$$Lambda/0x000000100176eaa8.accept", + "lineNumber": -1 + }, + { + "fileName": "MdcContextActor.java", + "function": "com.fsmatic.mdc.MdcContextActor.wrapReceive", + "lineNumber": 37 + }, + { + "fileName": "ActorSupport.java", + "function": "com.fsmatic.akka.ActorSupport.aroundReceive", + "lineNumber": 30 + }, + { + "fileName": "AActor.java", + "function": "com.fsmatic.akka.AActor.aroundReceive", + "lineNumber": 34 + }, + { + "fileName": "ActorCell.scala", + "function": "akka.actor.ActorCell.receiveMessage", + "lineNumber": 614 + }, + { + "fileName": "ActorCell.scala", + "function": "akka.actor.ActorCell.invoke", + "lineNumber": 583 + }, + { + "fileName": "Mailbox.scala", + "function": "akka.dispatch.Mailbox.processMailbox", + "lineNumber": 268 + }, + { + "fileName": "Mailbox.scala", + "function": "akka.dispatch.Mailbox.run", + "lineNumber": 229 + }, + { + "fileName": "Mailbox.scala", + "function": "akka.dispatch.Mailbox.exec", + "lineNumber": 241 + }, + { + "fileName": "ForkJoinTask.java", + "function": "akka.dispatch.forkjoin.ForkJoinTask.doExec", + "lineNumber": 260 + }, + { + "fileName": "ForkJoinPool.java", + "function": "akka.dispatch.forkjoin.ForkJoinPool$WorkQueue.runTask", + "lineNumber": 1339 + }, + { + "fileName": "ForkJoinPool.java", + "function": "akka.dispatch.forkjoin.ForkJoinPool.runWorker", + "lineNumber": 1979 + }, + { + "fileName": "ForkJoinWorkerThread.java", + "function": "akka.dispatch.forkjoin.ForkJoinWorkerThread.run", + "lineNumber": 107 + } + ], + "captures": { + "lines": { + "205": { + "arguments": { + "this": { + "type": "com.dd.logs.security_analytics.ImmutableEntityStatsOutput", + "fields": { + "projectionAttributes": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.common.collect.RegularImmutableMap", + "type": "com.google.common.collect.RegularImmutableMap" + }, + "signalScore": { + "type": "long", + "value": "1" + }, + "logger": { + "type": "com.dd.logging.BasicLogger", + "fields": { + "metas": { + "isNull": true, + "type": "java.util.Map" + }, + "logger": { + "type": "ch.qos.logback.classic.Logger", + "fields": { + "parent": { + "notCapturedReason": "depth", + "type": "ch.qos.logback.classic.Logger" + }, + "level": { + "isNull": true, + "type": "ch.qos.logback.classic.Level" + }, + "name": { + "type": "java.lang.String", + "value": "com.dd.logs.security_analytics.EntityStatsOutput" + }, + "aai": { + "isNull": true, + "type": "ch.qos.logback.core.spi.AppenderAttachableImpl" + }, + "childrenList": { + "isNull": true, + "type": "java.util.List" + }, + "loggerContext": { + "notCapturedReason": "depth", + "type": "ch.qos.logback.classic.LoggerContext" + }, + "effectiveLevelInt": { + "type": "int", + "value": "20000" + }, + "additive": { + "type": "boolean", + "value": "true" + } + } + }, + "name": { + "type": "java.lang.String", + "value": "com.dd.logs.security_analytics.EntityStatsOutput" + } + } + }, + "count": { + "type": "long", + "value": "1" + }, + "projectionTags": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.common.collect.SingletonImmutableBiMap", + "type": "com.google.common.collect.SingletonImmutableBiMap" + }, + "internalIntakeTimestamp": { + "type": "long", + "value": "1709233217857" + }, + "id": { + "type": "java.lang.String", + "value": "AY318mP5AAB-QSHUZlx-FQAA" + }, + "trackKey": { + "type": "com.dd.logs.Track$Key", + "fields": { + "type": { + "type": "com.dd.logs.TrackType", + "fields": { + "name": { + "type": "java.lang.String", + "value": "entitystat" + } + } + }, + "orgId": { + "type": "long", + "value": "2" + } + } + }, + "entity": { + "type": "com.dd.cloudsecurityplatform.pb.EntityOutput", + "fields": { + "id_": { + "type": "java.lang.String", + "value": "10.154.142.130" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "typeString_": { + "type": "java.lang.String", + "value": "ip_address" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + } + } + }, + "locals": { + "ipAttributes": { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues$Builder", + "fields": { + "unknownFieldsOrBuilder": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "size": "0", + "type": "java.util.TreeMap" + } + } + }, + "isClean": { + "type": "boolean", + "value": "false" + }, + "bitField0_": { + "type": "int", + "value": "0" + }, + "meAsParent": { + "isNull": true, + "type": "com.google.protobuf.GeneratedMessageV3$Builder$BuilderParentImpl" + }, + "value_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.LazyStringArrayList", + "type": "com.google.protobuf.LazyStringArrayList" + }, + "builderParent": { + "isNull": true, + "type": "com.google.protobuf.GeneratedMessageV3$BuilderParent" + } + } + }, + "outputBuilder": { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$Builder", + "fields": { + "entity_": { + "type": "com.dd.cloudsecurityplatform.pb.EntityOutput", + "fields": { + "id_": { + "type": "java.lang.String", + "value": "10.154.142.130" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "typeString_": { + "type": "java.lang.String", + "value": "ip_address" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + "stringProjections_": { + "type": "com.google.protobuf.MapField", + "fields": { + "mode": { + "type": "com.google.protobuf.MapField$StorageMode", + "value": "MAP" + }, + "mapData": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.protobuf.MapField$MutabilityAwareMap", + "type": "com.google.protobuf.MapField$MutabilityAwareMap" + }, + "isMutable": { + "type": "boolean", + "value": "true" + }, + "listData": { + "isNull": true, + "type": "java.util.List" + }, + "converter": { + "type": "com.google.protobuf.MapField$ImmutableMessageConverter", + "fields": { + "defaultEntry": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.MapEntry" + } + } + } + } + }, + "threatIntelIndicatorsMatched_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.LazyStringArrayList", + "type": "com.google.protobuf.LazyStringArrayList" + }, + "bitField0_": { + "type": "int", + "value": "54" + }, + "entityBuilder_": { + "isNull": true, + "type": "com.google.protobuf.SingleFieldBuilderV3" + }, + "threatIntelResults_": { + "size": "9", + "elements": [ + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "15.158.54.42" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "residential_proxy" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "spur" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "199.66.15.4" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "https://spur.us" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "suspicious" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "64.252.144.155" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "70.132.18.132" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.185.207" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "15.158.41.133" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.135.146" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "3.172.1.71" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.130.132" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.UnknownFieldSet" + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ], + "type": "java.util.ArrayList" + }, + "builderParent": { + "isNull": true, + "type": "com.google.protobuf.GeneratedMessageV3$BuilderParent" + }, + "unknownFieldsOrBuilder": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "size": "0", + "type": "java.util.TreeMap" + } + } + }, + "geoIpMetadataBuilder_": { + "isNull": true, + "type": "com.google.protobuf.RepeatedFieldBuilderV3" + }, + "isClean": { + "type": "boolean", + "value": "false" + }, + "count_": { + "type": "long", + "value": "1" + }, + "tagProjections_": { + "type": "com.google.protobuf.MapField", + "fields": { + "mode": { + "type": "com.google.protobuf.MapField$StorageMode", + "value": "MAP" + }, + "mapData": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Map type: com.google.protobuf.MapField$MutabilityAwareMap", + "type": "com.google.protobuf.MapField$MutabilityAwareMap" + }, + "isMutable": { + "type": "boolean", + "value": "true" + }, + "listData": { + "isNull": true, + "type": "java.util.List" + }, + "converter": { + "type": "com.google.protobuf.MapField$ImmutableMessageConverter", + "fields": { + "defaultEntry": { + "notCapturedReason": "depth", + "type": "com.google.protobuf.MapEntry" + } + } + } + } + }, + "geoIpMetadata_": { + "size": "0", + "type": "java.util.ArrayList" + }, + "ip_": { + "type": "java.lang.String", + "value": "" + }, + "meAsParent": { + "isNull": true, + "type": "com.google.protobuf.GeneratedMessageV3$Builder$BuilderParentImpl" + }, + "threatIntelResultsBuilder_": { + "isNull": true, + "type": "com.google.protobuf.RepeatedFieldBuilderV3" + }, + "hosts_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.LazyStringArrayList", + "type": "com.google.protobuf.LazyStringArrayList" + }, + "signalScore_": { + "type": "long", + "value": "1" + } + } + }, + "geoIpMetadata": { + "size": "0", + "type": "java.util.ArrayList" + }, + "hosts": { + "size": "5", + "elements": [ + { + "type": "java.lang.String", + "value": "i-02d87409e6596f562" + }, + { + "type": "java.lang.String", + "value": "i-0f42b05f770544642" + }, + { + "type": "java.lang.String", + "value": "i-0ab705684278ad06b" + }, + { + "type": "java.lang.String", + "value": "i-0218eea919deb6e1a" + }, + { + "type": "java.lang.String", + "value": "i-0405eec023d49f192" + } + ], + "type": "java.util.ArrayList" + }, + "tagProjections": { + "entries": [ + [ + { + "type": "java.lang.String", + "value": "source" + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues", + "fields": { + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "value_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.UnmodifiableLazyStringList", + "type": "com.google.protobuf.UnmodifiableLazyStringList" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ] + ], + "size": "1", + "type": "java.util.HashMap" + }, + "stringProjections": { + "entries": [ + [ + { + "type": "java.lang.String", + "value": "service" + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues", + "fields": { + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "value_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.UnmodifiableLazyStringList", + "type": "com.google.protobuf.UnmodifiableLazyStringList" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ], + [ + { + "type": "java.lang.String", + "value": "custom.usr.id" + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$StringProjectionValues", + "fields": { + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "value_": { + "size": "0", + "notCapturedReason": "java.lang.RuntimeException: Unsupported Collection type: com.google.protobuf.UnmodifiableLazyStringList", + "type": "com.google.protobuf.UnmodifiableLazyStringList" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ] + ], + "size": "2", + "type": "java.util.HashMap" + }, + "threatIntelResults": { + "size": "9", + "elements": [ + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "15.158.54.42" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "residential_proxy" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "spur" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "199.66.15.4" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "https://spur.us" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "suspicious" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "64.252.144.155" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "70.132.18.132" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.185.207" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "15.158.41.133" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.135.146" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "3.172.1.71" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + }, + { + "type": "com.dd.cloudsecurityplatform.pb.EntityStatsPayload$ThreatIntelResult", + "fields": { + "category_": { + "type": "java.lang.String", + "value": "" + }, + "sourceName_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedHashCode": { + "type": "int", + "value": "0" + }, + "indicator_": { + "type": "java.lang.String", + "value": "130.176.130.132" + }, + "sourceUrl_": { + "type": "java.lang.String", + "value": "" + }, + "memoizedIsInitialized": { + "type": "byte", + "value": "1" + }, + "alwaysUseFieldBuilders": { + "type": "boolean", + "value": "false" + }, + "attribute_": { + "type": "java.lang.String", + "value": "" + }, + "type_": { + "type": "java.lang.String", + "value": "IP" + }, + "intention_": { + "type": "java.lang.String", + "value": "" + }, + "unknownFields": { + "type": "com.google.protobuf.UnknownFieldSet", + "fields": { + "fields": { + "notCapturedReason": "depth", + "type": "java.util.TreeMap" + } + } + }, + "memoizedSize": { + "type": "int", + "value": "-1" + } + } + } + ], + "type": "java.util.ArrayList" + } + } + } + } + }, + "language": "java", + "id": "d7141999-c5bd-4887-b855-66c7a4dbb9a4", + "probe": { + "location": { + "file": "domains/cloud-security-platform/apps/security-monitoring-entity-reducer/src/main/java/com/dd/logs/security_analytics/EntityStatsOutput.java", + "method": "toByteString", + "lines": [ + "205" + ], + "type": "com.dd.logs.security_analytics.EntityStatsOutput" + }, + "id": "13da639f-2b81-475c-9366-5aa227a07302", + "version": 1 + }, + "timestamp": 1709233217858 + } + }, + "logger": { + "thread_id": 120, + "method": "toByteString", + "thread_name": "FsmaticDataCluster-fsmatic.workload.default.work-dispatcher-42", + "name": "com.dd.logs.security_analytics.EntityStatsOutput", + "version": 2 + } +} \ No newline at end of file diff --git a/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json new file mode 100644 index 0000000000000..2f65ebd33f8fb --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/testdata/snapshot-02.json @@ -0,0 +1,1645 @@ +{ + "service": "logs-intake-coordinator", + "message": "MetricsClient.parseSuccess response={Cannot find symbol: response} returns {status=ok, resType=time_series, series=..., fromDate=1709233490000, toDate=1709233550000}, ...", + "ddsource": "dd_debugger", + "duration": 2568054, + "ddtags": "git.commit.sha:3698e1f3da2142d6399ef311e80c970e8e89eb02,app:logs-intake-coordinator", + "debugger": { + "snapshot": { + "stack": [ + { + "fileName": "MetricsClient.java", + "function": "com.dd.logs.metricsclient.MetricsClient.parseSuccess", + "lineNumber": 16 + }, + { + "fileName": "AHttpServiceCall.java", + "function": "com.fsmatic.http.AHttpServiceCall.parseResponse", + "lineNumber": 203 + }, + { + "fileName": "AHttpServiceCall.java", + "function": "com.fsmatic.http.AHttpServiceCall$HttpCall.lambda$execute$1", + "lineNumber": 389 + }, + { + "function": "com.fsmatic.http.AHttpServiceCall$HttpCall$$Lambda/0x00007f8843a54460.apply", + "lineNumber": -1 + }, + { + "fileName": "CompletableFuture.java", + "function": "java.util.concurrent.CompletableFuture$UniApply.tryFire", + "lineNumber": 646 + }, + { + "fileName": "CompletableFuture.java", + "function": "java.util.concurrent.CompletableFuture.postComplete", + "lineNumber": 510 + }, + { + "fileName": "CompletableFuture.java", + "function": "java.util.concurrent.CompletableFuture.complete", + "lineNumber": 2179 + }, + { + "fileName": "CompletableCallback.java", + "function": "com.fsmatic.http.CompletableCallback.onResponse", + "lineNumber": 55 + }, + { + "fileName": "RealCall.java", + "function": "okhttp3.RealCall$AsyncCall.execute", + "lineNumber": 174 + }, + { + "fileName": "NamedRunnable.java", + "function": "okhttp3.internal.NamedRunnable.run", + "lineNumber": 32 + }, + { + "fileName": "ThreadPoolExecutor.java", + "function": "java.util.concurrent.ThreadPoolExecutor.runWorker", + "lineNumber": 1144 + }, + { + "fileName": "ThreadPoolExecutor.java", + "function": "java.util.concurrent.ThreadPoolExecutor$Worker.run", + "lineNumber": 642 + }, + { + "fileName": "Thread.java", + "function": "java.lang.Thread.runWith", + "lineNumber": 1596 + }, + { + "fileName": "Thread.java", + "function": "java.lang.Thread.run", + "lineNumber": 1583 + } + ], + "captures": { + "return": { + "arguments": { + "p0": { + "type": "okhttp3.Response", + "fields": { + "request": { + "type": "okhttp3.Request", + "fields": { + "headers": { + "type": "okhttp3.Headers", + "fields": { + "namesAndValues": { + "notCapturedReason": "depth", + "type": "java.lang.String[]" + } + } + }, + "method": { + "type": "java.lang.String", + "value": "GET" + }, + "body": { + "isNull": true, + "type": "okhttp3.RequestBody" + }, + "url": { + "type": "okhttp3.HttpUrl", + "fields": { + "password": { + "notCapturedReason": "redactedIdent", + "type": "java.lang.String" + }, + "fragment": { + "isNull": true, + "type": "java.lang.String" + }, + "scheme": { + "type": "java.lang.String", + "value": "https" + }, + "$$DD$source": { + "isNull": true, + "type": "datadog.trace.api.iast.Taintable$Source" + }, + "port": { + "type": "int", + "value": "443" + }, + "queryNamesAndValues": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "host": { + "type": "java.lang.String", + "value": "api.datad0g.com" + }, + "pathSegments": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "url": { + "size": "663", + "truncated": true, + "type": "java.lang.String", + "value": "https://api.datad0g.com/api/v1/query?query=timeshift%28%28max%3Akubernetes_state.statefulset.replicas_desired%7Bdatacenter%3Aus1.staging.dog%2Cdds%3Aevent-platform%2Cservice%3Alogs-intake-backend%7D%20by%20%7Bkube_stateful_set%7D%20-%20max%3Akubernetes_st" + }, + "username": { + "type": "java.lang.String", + "value": "" + } + } + }, + "cacheControl": { + "isNull": true, + "type": "okhttp3.CacheControl" + }, + "tags": { + "size": "0", + "type": "java.util.Collections$EmptyMap" + } + } + }, + "handshake": { + "type": "okhttp3.Handshake", + "fields": { + "localCertificates": { + "size": "0", + "type": "java.util.Collections$EmptyList" + }, + "peerCertificates": { + "size": "2", + "elements": [ + { + "notCapturedReason": "depth", + "type": "sun.security.x509.X509CertImpl" + }, + { + "notCapturedReason": "depth", + "type": "sun.security.x509.X509CertImpl" + } + ], + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "tlsVersion": { + "type": "okhttp3.TlsVersion", + "value": "TLS_1_2" + }, + "cipherSuite": { + "type": "okhttp3.CipherSuite", + "fields": { + "javaName": { + "type": "java.lang.String", + "value": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + } + } + } + } + }, + "headers": { + "type": "okhttp3.Headers", + "fields": { + "namesAndValues": { + "size": "26", + "elements": [ + { + "type": "java.lang.String", + "value": "date" + }, + { + "type": "java.lang.String", + "value": "Thu, 29 Feb 2024 19:05:52 GMT" + }, + { + "type": "java.lang.String", + "value": "content-type" + }, + { + "type": "java.lang.String", + "value": "application/json" + }, + { + "type": "java.lang.String", + "value": "x-frame-options" + }, + { + "type": "java.lang.String", + "value": "SAMEORIGIN" + }, + { + "type": "java.lang.String", + "value": "content-security-policy" + }, + { + "type": "java.lang.String", + "value": "frame-ancestors 'self'; report-uri https://logs.browser-intake-datadoghq.com/api/v2/logs?dd-api-key=pub293163a918901030b79492fe1ab424cf&dd-evp-origin=content-security-policy&ddsource=csp-report&ddtags=site%3Adatad0g.com" + }, + { + "type": "java.lang.String", + "value": "vary" + }, + { + "type": "java.lang.String", + "value": "Accept-Encoding" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-limit" + }, + { + "type": "java.lang.String", + "value": "100" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-period" + }, + { + "type": "java.lang.String", + "value": "10" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-remaining" + }, + { + "type": "java.lang.String", + "value": "96" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-reset" + }, + { + "type": "java.lang.String", + "value": "10" + }, + { + "type": "java.lang.String", + "value": "x-ratelimit-name" + }, + { + "type": "java.lang.String", + "value": "batch_query" + }, + { + "type": "java.lang.String", + "value": "x-content-type-options" + }, + { + "type": "java.lang.String", + "value": "nosniff" + }, + { + "type": "java.lang.String", + "value": "strict-transport-security" + }, + { + "type": "java.lang.String", + "value": "max-age=31536000; includeSubDomains; preload" + }, + { + "type": "java.lang.String", + "value": "transfer-encoding" + }, + { + "type": "java.lang.String", + "value": "chunked" + } + ], + "type": "java.lang.String[]" + } + } + }, + "code": { + "type": "int", + "value": "200" + }, + "sentRequestAtMillis": { + "type": "long", + "value": "1709233550915" + }, + "networkResponse": { + "type": "okhttp3.Response", + "fields": { + "request": { + "type": "okhttp3.Request", + "fields": { + "headers": { + "notCapturedReason": "depth", + "type": "okhttp3.Headers" + }, + "method": { + "type": "java.lang.String", + "value": "GET" + }, + "body": { + "isNull": true, + "type": "okhttp3.RequestBody" + }, + "url": { + "notCapturedReason": "depth", + "type": "okhttp3.HttpUrl" + }, + "cacheControl": { + "isNull": true, + "type": "okhttp3.CacheControl" + }, + "tags": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyMap" + } + } + }, + "handshake": { + "type": "okhttp3.Handshake", + "fields": { + "localCertificates": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "peerCertificates": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "tlsVersion": { + "type": "okhttp3.TlsVersion", + "value": "TLS_1_2" + }, + "cipherSuite": { + "notCapturedReason": "depth", + "type": "okhttp3.CipherSuite" + } + } + }, + "headers": { + "type": "okhttp3.Headers", + "fields": { + "namesAndValues": { + "notCapturedReason": "depth", + "type": "java.lang.String[]" + } + } + }, + "code": { + "type": "int", + "value": "200" + }, + "sentRequestAtMillis": { + "type": "long", + "value": "1709233550915" + }, + "networkResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "message": { + "type": "java.lang.String", + "value": "OK" + }, + "body": { + "isNull": true, + "type": "okhttp3.ResponseBody" + }, + "cacheControl": { + "isNull": true, + "type": "okhttp3.CacheControl" + }, + "cacheResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "protocol": { + "type": "okhttp3.Protocol", + "value": "HTTP_1_1" + }, + "priorResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "receivedResponseAtMillis": { + "type": "long", + "value": "1709233552199" + }, + "exchange": { + "type": "okhttp3.internal.connection.Exchange", + "fields": { + "call": { + "notCapturedReason": "depth", + "type": "okhttp3.RealCall" + }, + "codec": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.http1.Http1ExchangeCodec" + }, + "eventListener": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpEventLogger" + }, + "duplex": { + "type": "boolean", + "value": "false" + }, + "transmitter": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.Transmitter" + }, + "finder": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.ExchangeFinder" + } + } + } + } + }, + "message": { + "type": "java.lang.String", + "value": "OK" + }, + "body": { + "type": "okhttp3.internal.http.RealResponseBody", + "fields": { + "reader": { + "type": "okhttp3.ResponseBody$BomAwareReader", + "fields": { + "delegate": { + "notCapturedReason": "depth", + "type": "java.io.InputStreamReader" + }, + "charset": { + "notCapturedReason": "depth", + "type": "sun.nio.cs.UTF_8" + }, + "skipBuffer": { + "notCapturedReason": "java.lang.reflect.InaccessibleObjectException: Unable to make field private char[] java.io.Reader.skipBuffer accessible: module java.base does not \"opens java.io\" to unnamed module @dc24521", + "type": "char[]" + }, + "closed": { + "type": "boolean", + "value": "true" + }, + "lock": { + "notCapturedReason": "java.lang.reflect.InaccessibleObjectException: Unable to make field protected java.lang.Object java.io.Reader.lock accessible: module java.base does not \"opens java.io\" to unnamed module @dc24521", + "type": "java.lang.Object" + }, + "source": { + "notCapturedReason": "depth", + "type": "okio.RealBufferedSource" + } + } + }, + "contentTypeString": { + "type": "java.lang.String", + "value": "application/json" + }, + "contentLength": { + "type": "long", + "value": "-1" + }, + "source": { + "type": "okio.RealBufferedSource", + "fields": { + "closed": { + "type": "boolean", + "value": "true" + }, + "buffer": { + "notCapturedReason": "depth", + "type": "okio.Buffer" + }, + "source": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpMonitoringInterceptor$LengthTrackingSource" + } + } + } + } + }, + "cacheControl": { + "isNull": true, + "type": "okhttp3.CacheControl" + }, + "cacheResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "protocol": { + "type": "okhttp3.Protocol", + "value": "HTTP_1_1" + }, + "priorResponse": { + "isNull": true, + "type": "okhttp3.Response" + }, + "receivedResponseAtMillis": { + "type": "long", + "value": "1709233552199" + }, + "exchange": { + "type": "okhttp3.internal.connection.Exchange", + "fields": { + "call": { + "type": "okhttp3.RealCall", + "fields": { + "originalRequest": { + "notCapturedReason": "depth", + "type": "okhttp3.Request" + }, + "forWebSocket": { + "type": "boolean", + "value": "false" + }, + "client": { + "notCapturedReason": "depth", + "type": "okhttp3.OkHttpClient" + }, + "executed": { + "type": "boolean", + "value": "true" + }, + "transmitter": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.Transmitter" + } + } + }, + "codec": { + "type": "okhttp3.internal.http1.Http1ExchangeCodec", + "fields": { + "sink": { + "notCapturedReason": "depth", + "type": "okio.RealBufferedSink" + }, + "client": { + "notCapturedReason": "depth", + "type": "okhttp3.OkHttpClient" + }, + "realConnection": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.RealConnection" + }, + "headerLimit": { + "type": "long", + "value": "261503" + }, + "source": { + "notCapturedReason": "depth", + "type": "okio.RealBufferedSource" + }, + "state": { + "type": "int", + "value": "6" + }, + "trailers": { + "notCapturedReason": "depth", + "type": "okhttp3.Headers" + } + } + }, + "eventListener": { + "type": "com.fsmatic.http.OkHttpEventLogger", + "fields": { + "loggers": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.ConcurrentHashMap" + }, + "hostRegexps": { + "notCapturedReason": "depth", + "type": "java.util.ArrayList" + } + } + }, + "duplex": { + "type": "boolean", + "value": "false" + }, + "transmitter": { + "type": "okhttp3.internal.connection.Transmitter", + "fields": { + "request": { + "notCapturedReason": "depth", + "type": "okhttp3.Request" + }, + "noMoreExchanges": { + "type": "boolean", + "value": "true" + }, + "connectionPool": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.RealConnectionPool" + }, + "callStackTrace": { + "isNull": true, + "type": "java.lang.Object" + }, + "timeoutEarlyExit": { + "type": "boolean", + "value": "false" + }, + "exchangeResponseDone": { + "type": "boolean", + "value": "true" + }, + "timeout": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.Transmitter$1" + }, + "call": { + "notCapturedReason": "depth", + "type": "okhttp3.RealCall" + }, + "canceled": { + "type": "boolean", + "value": "false" + }, + "exchangeRequestDone": { + "type": "boolean", + "value": "true" + }, + "exchangeFinder": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.ExchangeFinder" + }, + "eventListener": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpEventLogger" + }, + "client": { + "notCapturedReason": "depth", + "type": "okhttp3.OkHttpClient" + }, + "connection": { + "isNull": true, + "type": "okhttp3.internal.connection.RealConnection" + }, + "exchange": { + "isNull": true, + "type": "okhttp3.internal.connection.Exchange" + } + } + }, + "finder": { + "type": "okhttp3.internal.connection.ExchangeFinder", + "fields": { + "call": { + "notCapturedReason": "depth", + "type": "okhttp3.RealCall" + }, + "address": { + "notCapturedReason": "depth", + "type": "okhttp3.Address" + }, + "connectingConnection": { + "isNull": true, + "type": "okhttp3.internal.connection.RealConnection" + }, + "routeSelector": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.RouteSelector" + }, + "hasStreamFailure": { + "type": "boolean", + "value": "false" + }, + "eventListener": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpEventLogger" + }, + "connectionPool": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.RealConnectionPool" + }, + "routeSelection": { + "isNull": true, + "type": "okhttp3.internal.connection.RouteSelector$Selection" + }, + "nextRouteToTry": { + "isNull": true, + "type": "okhttp3.Route" + }, + "transmitter": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.connection.Transmitter" + } + } + } + } + } + } + }, + "p1": { + "type": "com.dd.logs.metricsclient.MetricsClientActions$QueryMetric", + "fields": { + "query": { + "size": "492", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift((max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} by {kube_stateful_set} - max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platfor" + }, + "notCritical": { + "type": "boolean", + "value": "false" + }, + "from": { + "type": "java.time.Instant", + "value": "2024-02-29T19:04:50.915679003Z" + }, + "to": { + "type": "java.time.Instant", + "value": "2024-02-29T19:05:50.915679003Z" + } + } + }, + "this": { + "type": "com.dd.logs.metricsclient.MetricsClient", + "fields": { + "shouldTrace": { + "type": "boolean", + "value": "true" + }, + "apiKey": { + "notCapturedReason": "redactedIdent", + "type": "java.lang.String" + }, + "throttledLogger": { + "type": "com.dd.logging.ThrottledLogger", + "fields": { + "logger": { + "type": "com.dd.logging.BasicLogger", + "fields": { + "metas": { + "isNull": true, + "type": "java.util.Map" + }, + "logger": { + "notCapturedReason": "depth", + "type": "ch.qos.logback.classic.Logger" + }, + "name": { + "type": "java.lang.String", + "value": "com.fsmatic.http.AHttpServiceCall" + } + } + }, + "throttler": { + "type": "com.dd.logging.throttler.ByPassThrottler", + "fields": { + "current": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.atomic.AtomicReference" + }, + "rate": { + "notCapturedReason": "depth", + "type": "com.dd.logging.throttler.ByPassThrottler$Rate" + }, + "clock": { + "notCapturedReason": "depth", + "type": "java.time.Clock$SystemClock" + } + } + } + } + }, + "executor": { + "type": "com.fsmatic.rpc.RPCCallExecutor", + "fields": { + "policy": { + "type": "com.fsmatic.rpc.RPCCallExecutor$Policy$NoOp" + } + } + }, + "applicationKey": { + "type": "java.lang.String", + "value": "" + }, + "wrapper": { + "type": "com.fsmatic.http.HttpWrapper", + "fields": { + "shouldTrace": { + "type": "boolean", + "value": "true" + }, + "clientInstrumentation": { + "type": "com.fsmatic.http.OkHttpClientInstrumentation", + "fields": { + "dispatcherInstrumentation": { + "notCapturedReason": "depth", + "type": "com.fsmatic.http.OkHttpDispatcherInstrumentation" + } + } + }, + "client": { + "notCapturedReason": "fieldCount", + "type": "okhttp3.OkHttpClient", + "fields": { + "cache": { + "isNull": true, + "type": "okhttp3.Cache" + }, + "socketFactory": { + "notCapturedReason": "depth", + "type": "javax.net.DefaultSocketFactory" + }, + "internalCache": { + "isNull": true, + "type": "okhttp3.internal.cache.InternalCache" + }, + "hostnameVerifier": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.tls.OkHostnameVerifier" + }, + "dns": { + "notCapturedReason": "depth", + "type": "okhttp3.Dns$$Lambda/0x00007f8843706ac8" + }, + "connectionPool": { + "notCapturedReason": "depth", + "type": "okhttp3.ConnectionPool" + }, + "certificateChainCleaner": { + "notCapturedReason": "depth", + "type": "okhttp3.internal.tls.BasicCertificateChainCleaner" + }, + "certificatePinner": { + "notCapturedReason": "depth", + "type": "okhttp3.CertificatePinner" + }, + "cookieJar": { + "notCapturedReason": "depth", + "type": "okhttp3.CookieJar$1" + }, + "connectionSpecs": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "networkInterceptors": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "proxySelector": { + "notCapturedReason": "depth", + "type": "sun.net.spi.DefaultProxySelector" + }, + "proxy": { + "isNull": true, + "type": "java.net.Proxy" + }, + "sslSocketFactory": { + "notCapturedReason": "depth", + "type": "sun.security.ssl.SSLSocketFactoryImpl" + }, + "eventListenerFactory": { + "notCapturedReason": "depth", + "type": "okhttp3.EventListener$$Lambda/0x00007f88437051a8" + }, + "proxyAuthenticator": { + "notCapturedReason": "depth", + "type": "okhttp3.Authenticator$$Lambda/0x00007f88437066a8" + }, + "protocols": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "dispatcher": { + "notCapturedReason": "depth", + "type": "okhttp3.Dispatcher" + }, + "authenticator": { + "notCapturedReason": "depth", + "type": "okhttp3.Authenticator$$Lambda/0x00007f88437066a8" + }, + "interceptors": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + } + } + }, + "mapper": { + "type": "com.fasterxml.jackson.databind.ObjectMapper", + "fields": { + "_serializerFactory": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.ser.BeanSerializerFactory" + }, + "_deserializationContext": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.deser.DefaultDeserializationContext$Impl" + }, + "_deserializationConfig": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.DeserializationConfig" + }, + "_injectableValues": { + "isNull": true, + "type": "com.fasterxml.jackson.databind.InjectableValues" + }, + "_registeredModuleTypes": { + "notCapturedReason": "depth", + "type": "java.util.LinkedHashSet" + }, + "_jsonFactory": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.MappingJsonFactory" + }, + "_coercionConfigs": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.cfg.CoercionConfigs" + }, + "_subtypeResolver": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.jsontype.impl.StdSubtypeResolver" + }, + "_configOverrides": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.cfg.ConfigOverrides" + }, + "_serializerProvider": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.ser.DefaultSerializerProvider$Impl" + }, + "_serializationConfig": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.SerializationConfig" + }, + "_mixIns": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.introspect.SimpleMixInResolver" + }, + "_typeFactory": { + "notCapturedReason": "depth", + "type": "com.fasterxml.jackson.databind.type.TypeFactory" + }, + "_rootDeserializers": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.ConcurrentHashMap" + } + } + }, + "metrics": { + "type": "com.dd.metrics.WeakRefMetricsCache", + "fields": { + "cache": { + "notCapturedReason": "depth", + "type": "com.dd.metrics.WeakRefDoubleCache" + }, + "rootRegistry": { + "notCapturedReason": "depth", + "type": "com.dd.metrics.RootMetricRegistry" + } + } + }, + "internalHttpPort": { + "type": "int", + "value": "9091" + } + } + }, + "metrics": { + "type": "com.dd.metrics.WeakRefMetricsCache", + "fields": { + "cache": { + "type": "com.dd.metrics.WeakRefDoubleCache", + "fields": { + "layer1": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.ConcurrentHashMap" + } + } + }, + "rootRegistry": { + "type": "com.dd.metrics.RootMetricRegistry", + "fields": { + "metrics": { + "notCapturedReason": "depth", + "type": "java.util.concurrent.ConcurrentHashMap" + } + } + } + } + }, + "uri": { + "type": "okhttp3.HttpUrl", + "fields": { + "password": { + "notCapturedReason": "redactedIdent", + "type": "java.lang.String" + }, + "fragment": { + "isNull": true, + "type": "java.lang.String" + }, + "scheme": { + "type": "java.lang.String", + "value": "https" + }, + "$$DD$source": { + "isNull": true, + "type": "datadog.trace.api.iast.Taintable$Source" + }, + "port": { + "type": "int", + "value": "443" + }, + "queryNamesAndValues": { + "isNull": true, + "type": "java.util.List" + }, + "host": { + "type": "java.lang.String", + "value": "api.datad0g.com" + }, + "pathSegments": { + "size": "3", + "elements": [ + { + "type": "java.lang.String", + "value": "api" + }, + { + "type": "java.lang.String", + "value": "v1" + }, + { + "type": "java.lang.String", + "value": "query" + } + ], + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "url": { + "type": "java.lang.String", + "value": "https://api.datad0g.com/api/v1/query" + }, + "username": { + "type": "java.lang.String", + "value": "" + } + } + }, + "timeout": { + "type": "long", + "value": "60000" + }, + "tags": { + "type": "com.dd.metrics.Tags", + "fields": { + "hashIsZero": { + "type": "boolean", + "value": "false" + }, + "hash": { + "type": "int", + "value": "0" + }, + "tags": { + "size": "1", + "elements": [ + { + "type": "java.lang.String", + "value": "action_name:metricsclient" + } + ], + "type": "java.util.ArrayList" + } + } + } + } + } + }, + "locals": { + "@return": { + "type": "com.dd.logs.metricsclient.ImmutableQueryResponse", + "fields": { + "fromDate": { + "type": "long", + "value": "1709233490000" + }, + "resType": { + "type": "java.lang.String", + "value": "time_series" + }, + "series": { + "size": "9", + "elements": [ + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "577", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-internal-all,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_s" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-internal-all,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "556", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-spans,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-spans,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "550", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-tcp,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-i" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-tcp,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "562", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-testing,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:lo" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-testing,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "559", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-upload,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:log" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-upload,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "550", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-all,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-i" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-all,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "574", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-all-datadog,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_se" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-all-datadog,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "562", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-all-rum,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:lo" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-all-rum,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + }, + { + "type": "com.dd.logs.metricsclient.ImmutableSeries", + "fields": { + "unit": { + "notCapturedReason": "depth", + "type": "java.util.Collections$EmptyList" + }, + "expression": { + "size": "553", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift(((max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-intake-backend-logs,datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} - max:kubernetes_state.statefulset.replicas_desired{kube_stateful_set:logs-" + }, + "metric": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "pointlist": { + "notCapturedReason": "depth", + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "displayName": { + "type": "java.lang.String", + "value": "timeshift(((kubernetes_state.statefulset.replicas_desired - kubernetes_state.statefulset.replicas_desired) / kubernetes_state.statefulset.replicas_desired),-60)" + }, + "scope": { + "type": "java.lang.String", + "value": "datacenter:us1.staging.dog,dds:event-platform,kube_cluster_name:oddish-b,kube_stateful_set:logs-intake-backend-logs,service:logs-intake-backend" + }, + "start": { + "type": "long", + "value": "1709233490000" + }, + "length": { + "type": "long", + "value": "16" + }, + "end": { + "type": "long", + "value": "1709233542000" + }, + "interval": { + "type": "long", + "value": "1" + }, + "aggr": { + "type": "java.lang.String", + "value": "max" + } + } + } + ], + "type": "java.util.Collections$UnmodifiableRandomAccessList" + }, + "toDate": { + "type": "long", + "value": "1709233550000" + }, + "query": { + "size": "492", + "truncated": true, + "type": "java.lang.String", + "value": "timeshift((max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platform,service:logs-intake-backend} by {kube_stateful_set} - max:kubernetes_state.statefulset.replicas_desired{datacenter:us1.staging.dog,dds:event-platfor" + }, + "groupBy": { + "size": "1", + "elements": [ + { + "type": "java.lang.String", + "value": "kube_stateful_set" + } + ], + "type": "java.util.Collections$SingletonList" + }, + "message": { + "type": "java.lang.String", + "value": "" + }, + "status": { + "type": "java.lang.String", + "value": "ok" + } + } + } + } + } + }, + "language": "java", + "id": "97775bd9-ca14-4192-8c15-21a177819305", + "evaluationErrors": [ + { + "expr": "response", + "message": "Cannot find symbol: response" + } + ], + "probe": { + "location": { + "method": "parseSuccess", + "type": "com.dd.logs.metricsclient.MetricsClient" + }, + "id": "23a08460-521f-4364-aff5-081221aba86d", + "version": 3 + }, + "timestamp": 1709233552203 + } + }, + "logger": { + "thread_id": 18170, + "method": "parseSuccess", + "thread_name": "OkHttp https://api.datad0g.com/...", + "name": "com.dd.logs.metricsclient.MetricsClient", + "version": 2 + } +} \ No newline at end of file diff --git a/pkg/dynamicinstrumentation/ebpf/ebpf.go b/pkg/dynamicinstrumentation/ebpf/ebpf.go new file mode 100644 index 0000000000000..177dcc2146f17 --- /dev/null +++ b/pkg/dynamicinstrumentation/ebpf/ebpf.go @@ -0,0 +1,174 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package ebpf provides utility for setting up and instrumenting the bpf code +// used by dynamic instrumentation +package ebpf + +import ( + "errors" + "fmt" + "io" + "text/template" + "time" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// SetupEventsMap creates the ringbuffer which all programs will use for sending output +func SetupEventsMap() error { + var err error + events, err := ebpf.NewMap(&ebpf.MapSpec{ + Name: "events", + Type: ebpf.RingBuf, + MaxEntries: 1 << 24, + }) + if err != nil { + return fmt.Errorf("could not create bpf map for sharing events with userspace: %w", err) + } + ditypes.EventsRingbuffer = events + return nil +} + +// AttachBPFUprobe attaches the probe to the specified process +func AttachBPFUprobe(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error { + executable, err := link.OpenExecutable(procInfo.BinaryPath) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", err.Error()) + return fmt.Errorf("could not open proc executable for attaching bpf probe: %w", err) + } + + spec, err := ebpf.LoadCollectionSpecFromReader(probe.InstrumentationInfo.BPFObjectFileReader) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", err.Error()) + return fmt.Errorf("could not create bpf collection for probe %s: %w", probe.ID, err) + } + + mapReplacements := map[string]*ebpf.Map{} + if probe.ID != ditypes.ConfigBPFProbeID { + // config probe is special and should not be on the same ringbuffer + // as the rest of regular events. Despite having the same "events" name, + // not using the pinned map means the config program uses a different + // ringbuffer. + mapReplacements["events"] = ditypes.EventsRingbuffer + } else { + configEvents, err := ebpf.NewMap(&ebpf.MapSpec{ + Type: ebpf.RingBuf, + MaxEntries: 1 << 24, + }) + if err != nil { + return fmt.Errorf("could not create bpf map for receiving probe configurations: %w", err) + } + mapReplacements["events"] = configEvents + } + + // Load the ebpf object + opts := ebpf.CollectionOptions{ + MapReplacements: mapReplacements, + } + + bpfObject, err := ebpf.NewCollectionWithOptions(spec, opts) + if err != nil { + var ve *ebpf.VerifierError + if errors.As(err, &ve) { + log.Infof("Verifier error: %+v\n", ve) + } + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", err.Error()) + return fmt.Errorf("could not load bpf collection for probe %s: %w", probe.ID, err) + } + + procInfo.InstrumentationObjects[probe.ID] = bpfObject + + // Populate map used for zero'ing out regions of memory + zeroValMap, ok := bpfObject.Maps["zeroval"] + if !ok { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", "could not find bpf map for zero value") + return fmt.Errorf("could not find bpf map for zero value in bpf object") + } + + var zeroSlice = make([]uint8, probe.InstrumentationInfo.InstrumentationOptions.ArgumentsMaxSize) + var index uint32 + err = zeroValMap.Update(index, zeroSlice, 0) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", "could not find bpf map for zero value") + return fmt.Errorf("could not use bpf map for zero value in bpf object: %w", err) + } + + // Attach BPF probe to function in executable + bpfProgram, ok := bpfObject.Programs[probe.GetBPFFuncName()] + if !ok { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "ATTACH_ERROR", fmt.Sprintf("couldn't find bpf program for symbol %s", probe.FuncName)) + return fmt.Errorf("could not find bpf program for symbol %s", probe.FuncName) + } + + link, err := executable.Uprobe(probe.FuncName, bpfProgram, &link.UprobeOptions{ + PID: int(procInfo.PID), + }) + if err != nil { + diagnostics.Diagnostics.SetError(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, "UPROBE_FAILURE", err.Error()) + return fmt.Errorf("could not attach bpf program via uprobe: %w", err) + } + + procInfo.SetUprobeLink(probe.ID, &link) + diagnostics.Diagnostics.SetStatus(procInfo.ServiceName, procInfo.RuntimeID, probe.ID, ditypes.StatusInstalled) + + return nil +} + +// CompileBPFProgram compiles the code for a single probe associated with the process given by procInfo +func CompileBPFProgram(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error { + f := func(in io.Reader, out io.Writer) error { + fileContents, err := io.ReadAll(in) + if err != nil { + return err + } + programTemplate, err := template.New("program_template").Parse(string(fileContents)) + if err != nil { + return err + } + err = programTemplate.Execute(out, probe) + if err != nil { + return err + } + return nil + } + + cfg := ddebpf.NewConfig() + opts := runtime.CompileOptions{ + AdditionalFlags: getCFlags(cfg), + ModifyCallback: f, + UseKernelHeaders: true, + } + compiledOutput, err := runtime.Dynamicinstrumentation.CompileWithOptions(cfg, opts) + if err != nil { + return err + } + probe.InstrumentationInfo.BPFObjectFileReader = compiledOutput + return nil +} + +func getCFlags(config *ddebpf.Config) []string { + cflags := []string{ + "-g", + "-Wno-unused-variable", + } + if config.BPFDebug { + cflags = append(cflags, "-DDEBUG=1") + } + return cflags +} + +const ( + compilationStepTimeout = 60 * time.Second +) diff --git a/pkg/dynamicinstrumentation/eventparser/event_parser.go b/pkg/dynamicinstrumentation/eventparser/event_parser.go new file mode 100644 index 0000000000000..952a1bb435cfd --- /dev/null +++ b/pkg/dynamicinstrumentation/eventparser/event_parser.go @@ -0,0 +1,268 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package eventparser is used for parsing raw bytes from bpf code into events +package eventparser + +import ( + "encoding/binary" + "fmt" + "reflect" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter" +) + +// MaxBufferSize is the maximum size of the output buffer from bpf which is read by this package +const MaxBufferSize = 10000 + +var ( + byteOrder = binary.LittleEndian +) + +// ParseEvent takes the raw buffer from bpf and parses it into an event. It also potentially +// applies a rate limit +func ParseEvent(record []byte, ratelimiters *ratelimiter.MultiProbeRateLimiter) *ditypes.DIEvent { + event := ditypes.DIEvent{} + + if len(record) < ditypes.SizeofBaseEvent { + log.Tracef("malformed event record (length %d)", len(record)) + return nil + } + baseEvent := *(*ditypes.BaseEvent)(unsafe.Pointer(&record[0])) + event.ProbeID = unix.ByteSliceToString(baseEvent.Probe_id[:]) + + allowed, _, _ := ratelimiters.AllowOneEvent(event.ProbeID) + if !allowed { + // log.Infof("event dropped by rate limit. Probe %s\t(%d dropped events out of %d)\n", + // event.ProbeID, droppedEvents, droppedEvents+successfulEvents) + return nil + } + + event.PID = baseEvent.Pid + event.UID = baseEvent.Uid + event.StackPCs = baseEvent.Program_counters[:] + event.Argdata = readParams(record[ditypes.SizeofBaseEvent:]) + return &event +} + +// ParseParams extracts just the parsed parameters from the full event record +func ParseParams(record []byte) ([]*ditypes.Param, error) { + if len(record) < 392 { + return nil, fmt.Errorf("malformed event record (length %d)", len(record)) + } + return readParams(record[392:]), nil +} + +func readParams(values []byte) []*ditypes.Param { + outputParams := []*ditypes.Param{} + for i := 0; i < MaxBufferSize; { + if i+3 >= len(values) { + break + } + paramTypeDefinition := parseTypeDefinition(values[i:]) + if paramTypeDefinition == nil { + break + } + + sizeOfTypeDefinition := countBufferUsedByTypeDefinition(paramTypeDefinition) + i += sizeOfTypeDefinition + val, numBytesRead := parseParamValue(paramTypeDefinition, values[i:]) + if reflect.Kind(val.Kind) == reflect.Slice { + // In BPF we read the slice by reading the maximum size of a slice + // that we allow, instead of just the size of the slice (which we + // know at runtime). This is to satisfy the verifier. When parsing + // here, we read just the actual slice content, but have to move the + // buffer index ahead by the amount of space used by the max read. + i += ditypes.SliceMaxSize + } else { + i += numBytesRead + } + outputParams = append(outputParams, val) + } + return outputParams +} + +// parseParamValue takes the representation of the param type's definition and the +// actual values in the buffer and populates the definition with the value parsed +// from the byte buffer. It returns the resulting parameter and an indication of +// how many bytes were read from the buffer +func parseParamValue(definition *ditypes.Param, buffer []byte) (*ditypes.Param, int) { + // Start by creating a stack with each layer of the definition + // which will correspond with the layers of the values read from buffer. + // This is done using a temporary stack. + tempStack := newParamStack() + definitionStack := newParamStack() + tempStack.push(definition) + for !tempStack.isEmpty() { + current := tempStack.pop() + definitionStack.push(copyParam(current)) + for i := 0; i < len(current.Fields); i++ { + tempStack.push(current.Fields[i]) + } + } + var i int + valueStack := newParamStack() + for i = 0; i+3 < len(buffer); { + paramDefinition := definitionStack.pop() + if paramDefinition == nil { + break + } + if !isTypeWithHeader(paramDefinition.Kind) { + // This is a regular value (no sub-fields). + // We parse the value of it from the buffer and push it to the value stack + paramDefinition.ValueStr = parseIndividualValue(paramDefinition.Kind, buffer[i:i+int(paramDefinition.Size)]) + i += int(paramDefinition.Size) + valueStack.push(paramDefinition) + } else if reflect.Kind(paramDefinition.Kind) == reflect.Pointer { + // Pointers are unique in that they have their own value, and sub-fields. + // We parse the value of it from the buffer, place it in the value for + // the pointer itself, then pop the next value and place it as a sub-field. + paramDefinition.ValueStr = parseIndividualValue(paramDefinition.Kind, buffer[i:i+int(paramDefinition.Size)]) + i += int(paramDefinition.Size) + paramDefinition.Fields = append(paramDefinition.Fields, valueStack.pop()) + valueStack.push(paramDefinition) + } else { + // This is a type with sub-fields which have already been parsed and push + // onto the value stack. We pop those and set them as fields in this type. + // We then push this type onto the value stack as it may also be a sub-field. + // In header types like this, paramDefinition.Size corresponds with the number of + // fields under it. + for n := 0; n < int(paramDefinition.Size); n++ { + paramDefinition.Fields = append([]*ditypes.Param{valueStack.pop()}, paramDefinition.Fields...) + } + valueStack.push(paramDefinition) + } + } + return valueStack.pop(), i +} + +func copyParam(p *ditypes.Param) *ditypes.Param { + return &ditypes.Param{ + Type: p.Type, + Kind: p.Kind, + Size: p.Size, + } +} + +func parseKindToString(kind byte) string { + if kind == 255 { + return "Unsupported" + } else if kind == 254 { + return "reached field limit" + } + + return reflect.Kind(kind).String() +} + +// parseTypeDefinition is given a buffer which contains the header type definition +// for basic/complex types, and the actual content of those types. +// It returns a fully populated tree of `ditypes.Param` which will be used for parsing +// the actual values +func parseTypeDefinition(b []byte) *ditypes.Param { + stack := newParamStack() + i := 0 + for { + if len(b) < 3 { + return nil + } + newParam := &ditypes.Param{ + Kind: b[i], + Size: binary.LittleEndian.Uint16(b[i+1 : i+3]), + Type: parseKindToString(b[i]), + } + if newParam.Kind == 0 && newParam.Size == 0 { + break + } + i += 3 + if isTypeWithHeader(newParam.Kind) { + stack.push(newParam) + continue + } + + stackCheck: + if stack.isEmpty() { + return newParam + } + top := stack.peek() + top.Fields = append(top.Fields, newParam) + if len(top.Fields) == int(top.Size) || + (reflect.Kind(top.Kind) == reflect.Pointer && len(top.Fields) == 1) { + newParam = stack.pop() + goto stackCheck + } + + } + return nil +} + +// countBufferUsedByTypeDefinition is used to determine that amount of bytes +// that were used to read the type definition. Each individual element of the +// definition uses 3 bytes (1 for kind, 2 for size). This is a needed calculation +// so we know where we should read the actual values in the buffer. +func countBufferUsedByTypeDefinition(root *ditypes.Param) int { + queue := []*ditypes.Param{root} + counter := 0 + for len(queue) != 0 { + front := queue[0] + queue = queue[1:] + counter += 3 + queue = append(queue, front.Fields...) + } + return counter +} + +func isTypeWithHeader(pieceType byte) bool { + return reflect.Kind(pieceType) == reflect.Struct || + reflect.Kind(pieceType) == reflect.Slice || + reflect.Kind(pieceType) == reflect.Array || + reflect.Kind(pieceType) == reflect.Pointer +} + +func parseIndividualValue(paramType byte, paramValueBytes []byte) string { + switch reflect.Kind(paramType) { + case reflect.Uint8: + return fmt.Sprintf("%d", uint8(paramValueBytes[0])) + case reflect.Int8: + return fmt.Sprintf("%d", int8(paramValueBytes[0])) + case reflect.Uint16: + return fmt.Sprintf("%d", byteOrder.Uint16(paramValueBytes)) + case reflect.Int16: + return fmt.Sprintf("%d", int16(byteOrder.Uint16(paramValueBytes))) + case reflect.Uint32: + return fmt.Sprintf("%d", byteOrder.Uint32(paramValueBytes)) + case reflect.Int32: + return fmt.Sprintf("%d", int32(byteOrder.Uint32(paramValueBytes))) + case reflect.Uint64: + return fmt.Sprintf("%d", byteOrder.Uint64(paramValueBytes)) + case reflect.Int64: + return fmt.Sprintf("%d", int64(byteOrder.Uint64(paramValueBytes))) + case reflect.Uint: + return fmt.Sprintf("%d", byteOrder.Uint64(paramValueBytes)) + case reflect.Int: + return fmt.Sprintf("%d", int(byteOrder.Uint64(paramValueBytes))) + case reflect.Pointer: + return fmt.Sprintf("0x%X", byteOrder.Uint64(paramValueBytes)) + case reflect.String: + return string(paramValueBytes) + case reflect.Bool: + if paramValueBytes[0] == 1 { + return "true" + } else { + return "false" + } + case ditypes.KindUnsupported: + return "UNSUPPORTED" + default: + return "" + } +} diff --git a/pkg/dynamicinstrumentation/eventparser/event_parser_test.go b/pkg/dynamicinstrumentation/eventparser/event_parser_test.go new file mode 100644 index 0000000000000..94496b5cd2d0f --- /dev/null +++ b/pkg/dynamicinstrumentation/eventparser/event_parser_test.go @@ -0,0 +1,298 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package eventparser + +import ( + "fmt" + "reflect" + "testing" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +func TestCountBufferUsedByTypeDefinition(t *testing.T) { + tests := []struct { + name string + param *ditypes.Param + expected int + }{ + { + name: "Struct with nested structs and ints", + param: &ditypes.Param{ + Kind: byte(reflect.Struct), + Size: 2, + Fields: []*ditypes.Param{ + {Kind: byte(reflect.Struct), Size: 2, Fields: []*ditypes.Param{ + {Kind: byte(reflect.Int), Size: 8}, + {Kind: byte(reflect.Int), Size: 8}, + }}, + {Kind: byte(reflect.Int), Size: 8}, + }, + }, + expected: 15, + }, + { + name: "Complex nested structure", + param: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + {Type: "struct", Size: 0x2, Kind: 0x19, Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "struct", Size: 0x2, Kind: 0x19, Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }}, + }}, + }, + }, + expected: 18, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := countBufferUsedByTypeDefinition(tt.param) + if result != tt.expected { + t.Errorf("Expected %d, got %d", tt.expected, result) + } + }) + } +} + +func TestParseParamValue(t *testing.T) { + tests := []struct { + name string + inputBuffer []byte + inputDefinition *ditypes.Param + expectedValue *ditypes.Param + }{ + { + name: "Basic slice of structs", + inputBuffer: []byte{ + 1, 2, 0, 3, 0, 0, 0, // Content of slice element 1 + 4, 5, 0, 6, 0, 0, 0, // Content of slice element 2 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Extra padding + }, + inputDefinition: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint16", Size: 0x2, Kind: 0x9}, + {Type: "uint32", Size: 0x4, Kind: 0xa}, + }}, + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint16", Size: 0x2, Kind: 0x9}, + {Type: "uint32", Size: 0x4, Kind: 0xa}, + }}, + }, + }, + expectedValue: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {ValueStr: "1", Type: "uint8", Size: 0x1, Kind: 0x8}, + {ValueStr: "2", Type: "uint16", Size: 0x2, Kind: 0x9}, + {ValueStr: "3", Type: "uint32", Size: 0x4, Kind: 0xa}, + }}, + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {ValueStr: "4", Type: "uint8", Size: 0x1, Kind: 0x8}, + {ValueStr: "5", Type: "uint16", Size: 0x2, Kind: 0x9}, + {ValueStr: "6", Type: "uint32", Size: 0x4, Kind: 0xa}, + }}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + val, _ := parseParamValue(tt.inputDefinition, tt.inputBuffer) + if !reflect.DeepEqual(val, tt.expectedValue) { + t.Errorf("Parsed incorrectly! Got %+v, expected %+v", val, tt.expectedValue) + } + }) + } +} + +func TestReadParams(t *testing.T) { + tests := []struct { + name string + inputBuffer []byte + expectedResult []*ditypes.Param + }{ + { + name: "Basic slice of structs", + inputBuffer: []byte{ + 23, 2, 0, // Slice with 2 elements + 25, 3, 0, // Slice elements are each a struct with 3 fields + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 9, 2, 0, // Struct field 2 is a uint16 (size 2) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + 25, 3, 0, // Slice elements are each a struct with 3 fields + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 9, 2, 0, // Struct field 2 is a uint16 (size 2) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + 1, 2, 0, 3, // Content of slice element 1 (not relevant for this function) + 4, 5, 0, 6, // Content of slice element 2 (not relevant for this function) + // Padding + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, + }, + expectedResult: []*ditypes.Param{{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {ValueStr: "1", Type: "uint8", Size: 0x1, Kind: 0x8}, + {ValueStr: "2", Type: "uint16", Size: 0x2, Kind: 0x9}, + {ValueStr: "3", Type: "uint8", Size: 0x1, Kind: 0x8}, + }}, + {Type: "struct", Size: 0x3, Kind: 0x19, Fields: []*ditypes.Param{ + {ValueStr: "4", Type: "uint8", Size: 0x1, Kind: 0x8}, + {ValueStr: "5", Type: "uint16", Size: 0x2, Kind: 0x9}, + {ValueStr: "6", Type: "uint8", Size: 0x1, Kind: 0x8}, + }}, + }, + }}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output := readParams(tt.inputBuffer) + if !reflect.DeepEqual(output, tt.expectedResult) { + fmt.Printf("Got: %v\n", output) + fmt.Printf("Expected: %v\n", tt.expectedResult) + t.Errorf("Didn't read correctly!") + } + }) + } +} +func TestParseTypeDefinition(t *testing.T) { + tests := []struct { + name string + inputBuffer []byte + expectedResult *ditypes.Param + }{ + { + name: "Slice of structs with uint8 and uint16 fields", + inputBuffer: []byte{ + 23, 2, 0, // Slice with 2 elements + + 25, 3, 0, // Slice elements are each a struct with 3 fields + + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 9, 2, 0, // Struct field 2 is a uint16 (size 2) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + + 25, 3, 0, // Slice elements are each a struct with 3 fields + + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 9, 2, 0, // Struct field 2 is a uint16 (size 2) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + + // Padding + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }, + expectedResult: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + { + Type: "struct", Size: 0x3, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint16", Size: 0x2, Kind: 0x9}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }, + }, + { + Type: "struct", Size: 0x3, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint16", Size: 0x2, Kind: 0x9}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }, + }, + }, + }, + }, + { + name: "Nested struct fields", + inputBuffer: []byte{ + 23, 2, 0, // Slice with 2 elements + 25, 4, 0, // Slice elements are each a struct with 2 fields + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 8, 1, 0, // Struct field 2 is a uint8 (size 1) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + 25, 2, 0, // Struct field 4 is a struct with 2 fields + 8, 1, 0, // Nested struct field 1 is a uint8 (size 1) + 8, 1, 0, // Nested struct field 2 is a uint8 (size 1) + 25, 4, 0, // Slice elements are each a struct with 2 fields + 8, 1, 0, // Struct field 1 is a uint8 (size 1) + 8, 1, 0, // Struct field 2 is a uint8 (size 1) + 8, 1, 0, // Struct field 3 is a uint8 (size 1) + 25, 2, 0, // Struct field 4 is a struct with 2 fields + 8, 1, 0, // Nested struct field 1 is a uint8 (size 1) + 8, 1, 0, // Nested struct field 2 is a uint8 (size 1) + 1, 2, 3, // Content of slice element 1 (top-level uint8, then 2 second tier uint8s) + 4, 5, 6, // Content of slice element 2 (top-level uint8, then 2 second tier uint8s) + // Padding + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }, + expectedResult: &ditypes.Param{ + Type: "slice", Size: 0x2, Kind: 0x17, + Fields: []*ditypes.Param{ + { + Type: "struct", Size: 0x4, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + { + Type: "struct", Size: 0x2, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }, + }, + }, + }, + { + Type: "struct", Size: 0x4, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + { + Type: "struct", Size: 0x2, Kind: 0x19, + Fields: []*ditypes.Param{ + {Type: "uint8", Size: 0x1, Kind: 0x8}, + {Type: "uint8", Size: 0x1, Kind: 0x8}, + }, + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + typeDefinition := parseTypeDefinition(tt.inputBuffer) + if !reflect.DeepEqual(typeDefinition, tt.expectedResult) { + fmt.Printf("%v\n", typeDefinition) + fmt.Printf("%v\n", tt.expectedResult) + t.Errorf("Not equal!") + } + }) + } +} diff --git a/pkg/dynamicinstrumentation/eventparser/param_stack.go b/pkg/dynamicinstrumentation/eventparser/param_stack.go new file mode 100644 index 0000000000000..b2359951ca25a --- /dev/null +++ b/pkg/dynamicinstrumentation/eventparser/param_stack.go @@ -0,0 +1,45 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package eventparser + +import ( + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +type paramStack struct { + arr []*ditypes.Param +} + +func newParamStack() *paramStack { + s := paramStack{arr: []*ditypes.Param{}} + return &s +} + +func (s *paramStack) isEmpty() bool { + return len(s.arr) == 0 +} + +func (s *paramStack) pop() *ditypes.Param { + if s.isEmpty() { + return nil + } + top := s.peek() + s.arr = s.arr[0 : len(s.arr)-1] + return top +} + +func (s *paramStack) peek() *ditypes.Param { + if s.isEmpty() { + return nil + } + return s.arr[len(s.arr)-1] +} + +func (s *paramStack) push(p *ditypes.Param) { + s.arr = append(s.arr, p) +} diff --git a/pkg/dynamicinstrumentation/config.go b/pkg/dynamicinstrumentation/module/config.go similarity index 96% rename from pkg/dynamicinstrumentation/config.go rename to pkg/dynamicinstrumentation/module/config.go index 8265cf8d5a3f1..fa8c7530d2242 100644 --- a/pkg/dynamicinstrumentation/config.go +++ b/pkg/dynamicinstrumentation/module/config.go @@ -3,7 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package dynamicinstrumentation +//go:build linux_bpf + +package module import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config" diff --git a/pkg/dynamicinstrumentation/module/doc.go b/pkg/dynamicinstrumentation/module/doc.go new file mode 100644 index 0000000000000..145cc294d401c --- /dev/null +++ b/pkg/dynamicinstrumentation/module/doc.go @@ -0,0 +1,11 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package module encapsulates a system-probe module which uses uprobes and bpf +// to exfiltrate data from running processes. This is the Go implementation of +// the dynamic instrumentation product. +package module diff --git a/pkg/dynamicinstrumentation/module/module.go b/pkg/dynamicinstrumentation/module/module.go new file mode 100644 index 0000000000000..c5cbfced2b919 --- /dev/null +++ b/pkg/dynamicinstrumentation/module/module.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package module + +import ( + "net/http" + + "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" + "github.com/DataDog/datadog-agent/cmd/system-probe/utils" + coreconfig "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/util/log" + + di "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation" +) + +// Module is the dynamic instrumentation system probe module +type Module struct { + godi *di.GoDI +} + +// NewModule creates a new dynamic instrumentation system probe module +func NewModule(config *Config) (*Module, error) { + godi, err := di.RunDynamicInstrumentation(&di.DIOptions{ + Offline: coreconfig.SystemProbe().GetBool("dynamic_instrumentation.offline_mode"), + ProbesFilePath: coreconfig.SystemProbe().GetString("dynamic_instrumentation.probes_file_path"), + SnapshotOutput: coreconfig.SystemProbe().GetString("dynamic_instrumentation.snapshot_output_file_path"), + DiagnosticOutput: coreconfig.SystemProbe().GetString("dynamic_instrumentation.diagnostics_output_file_path"), + }) + if err != nil { + return nil, err + } + return &Module{godi}, nil +} + +// Close disables the dynamic instrumentation system probe module +func (m *Module) Close() { + if m.godi == nil { + log.Info("Could not close dynamic instrumentation module, already closed") + return + } + log.Info("Closing dynamic instrumentation module") + m.godi.Close() +} + +// GetStats returns a map of various metrics about the state of the module +func (m *Module) GetStats() map[string]interface{} { + if m == nil || m.godi == nil { + log.Info("Could not get stats from dynamic instrumentation module, closed") + return map[string]interface{}{} + } + debug := map[string]interface{}{} + stats := m.godi.GetStats() + debug["PIDEventsCreated"] = stats.PIDEventsCreatedCount + debug["ProbeEventsCreated"] = stats.ProbeEventsCreatedCount + return debug +} + +// Register creates a health check endpoint for the dynamic instrumentation module +func (m *Module) Register(httpMux *module.Router) error { + httpMux.HandleFunc("/check", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, + func(w http.ResponseWriter, req *http.Request) { + stats := []string{} + utils.WriteAsJSON(w, stats) + })) + + log.Info("Registering dynamic instrumentation module") + return nil +} diff --git a/pkg/dynamicinstrumentation/module_linux.go b/pkg/dynamicinstrumentation/module/module_stub.go similarity index 55% rename from pkg/dynamicinstrumentation/module_linux.go rename to pkg/dynamicinstrumentation/module/module_stub.go index 193e8a90646ac..83956088c9466 100644 --- a/pkg/dynamicinstrumentation/module_linux.go +++ b/pkg/dynamicinstrumentation/module/module_stub.go @@ -3,34 +3,44 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package dynamicinstrumentation +//go:build !linux_bpf + +// Package module provides the dynamic instrumentaiton module. This is a stub meaning +// this empty file is used if the target platform does not support features required +// by dynamic instrumentation. +package module import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" - "github.com/DataDog/datadog-agent/pkg/util/log" + sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" ) //nolint:revive // TODO(DEBUG) Fix revive linter -type Module struct{} +type Config struct{} //nolint:revive // TODO(DEBUG) Fix revive linter -func NewModule(config *Config) (*Module, error) { - return &Module{}, nil +func NewConfig(_ *sysconfigtypes.Config) (*Config, error) { + return &Config{}, nil } //nolint:revive // TODO(DEBUG) Fix revive linter -func (m *Module) Close() { - log.Info("Closing user tracer module") +type Module struct { } +//nolint:revive // TODO(DEBUG) Fix revive linter +func NewModule(config *Config) (*Module, error) { + return nil, nil +} + +//nolint:revive // TODO(DEBUG) Fix revive linter +func (m *Module) Close() {} + //nolint:revive // TODO(DEBUG) Fix revive linter func (m *Module) GetStats() map[string]interface{} { - debug := map[string]interface{}{} - return debug + return nil } //nolint:revive // TODO(DEBUG) Fix revive linter func (m *Module) Register(_ *module.Router) error { - log.Info("Registering dynamic instrumentation module") return nil } diff --git a/pkg/dynamicinstrumentation/proctracker/proctracker.go b/pkg/dynamicinstrumentation/proctracker/proctracker.go new file mode 100644 index 0000000000000..f03d86c17efaa --- /dev/null +++ b/pkg/dynamicinstrumentation/proctracker/proctracker.go @@ -0,0 +1,251 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package proctracker provides a facility for Dynamic Instrumentation to discover +// and track the lifecycle of processes running on the same host +package proctracker + +import ( + "debug/elf" + "errors" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/go/binversion" + "github.com/DataDog/datadog-agent/pkg/process/monitor" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/util/kernel" + "golang.org/x/sys/unix" +) + +type processTrackerCallback func(ditypes.DIProcs) + +// ProcessTracker is adapted from https://github.com/DataDog/datadog-agent/blob/main/pkg/network/protocols/http/ebpf_gotls.go +type ProcessTracker struct { + procRoot string + lock sync.RWMutex + pm *monitor.ProcessMonitor + processes processes + binaries binaries + callback processTrackerCallback + unsubscribe []func() +} + +// NewProcessTracker creates a new ProcessTracer +func NewProcessTracker(callback processTrackerCallback) *ProcessTracker { + pt := ProcessTracker{ + pm: monitor.GetProcessMonitor(), + procRoot: kernel.ProcFSRoot(), + callback: callback, + binaries: make(map[binaryID]*runningBinary), + processes: make(map[pid]binaryID), + } + return &pt +} + +// Start subscribes to exec and exit events so dynamic instrumentation can be made +// aware of new processes that may need to be instrumented or instrumented processes +// that should no longer be instrumented +func (pt *ProcessTracker) Start() error { + + unsubscribeExec := pt.pm.SubscribeExec(pt.handleProcessStart) + unsubscribeExit := pt.pm.SubscribeExit(pt.handleProcessStop) + + pt.unsubscribe = append(pt.unsubscribe, unsubscribeExec) + pt.unsubscribe = append(pt.unsubscribe, unsubscribeExit) + + err := pt.pm.Initialize(false) + if err != nil { + return err + } + + return nil +} + +// Stop unsubscribes from exec and exit events +func (pt *ProcessTracker) Stop() { + for _, unsubscribe := range pt.unsubscribe { + unsubscribe() + } +} + +func (pt *ProcessTracker) handleProcessStart(pid uint32) { + exePath := filepath.Join(pt.procRoot, strconv.FormatUint(uint64(pid), 10), "exe") + + go pt.inspectBinary(exePath, pid) +} + +func (pt *ProcessTracker) handleProcessStop(pid uint32) { + pt.unregisterProcess(pid) +} + +func (pt *ProcessTracker) inspectBinary(exePath string, pid uint32) { + serviceName := getServiceName(pid) + if serviceName == "" { + // if the expected env vars are not set we don't inspect the binary + return + } + log.Info("Found instrumentation candidate", serviceName) + // binPath, err := os.Readlink(exePath) + // if err != nil { + // // /proc could be slow to update so we retry a few times + // end := time.Now().Add(10 * time.Millisecond) + // for end.After(time.Now()) { + // binPath, err = os.Readlink(exePath) + // if err == nil { + // break + // } + // time.Sleep(time.Millisecond) + // } + // } + // if err != nil { + // // we can't access the binary path here (pid probably ended already) + // // there is not much we can do, and we don't want to flood the logs + // log.Infof("cannot follow link %s -> %s, %s", exePath, binPath, err) + // // in docker, following the symlink does not work, but we can open the file in /proc + // // if we can't follow the symlink we try to open /proc directly + // // TODO: validate this approach + // binPath = exePath + // } + + // TODO: switch to using exePath for the demo, use conditional logic above moving forward + binPath := exePath + f, err := os.Open(exePath) + if err != nil { + // this should be a debug log, but we want to know if this happens + log.Infof("could not open file %s, %s", binPath, err) + return + } + defer f.Close() + + elfFile, err := elf.NewFile(f) + if err != nil { + log.Infof("file %s could not be parsed as an ELF file: %s", binPath, err) + return + } + + noFuncs := make(map[string]bininspect.FunctionConfiguration) + noStructs := make(map[bininspect.FieldIdentifier]bininspect.StructLookupFunction) + _, err = bininspect.InspectNewProcessBinary(elfFile, noFuncs, noStructs) + if errors.Is(err, binversion.ErrNotGoExe) { + return + } + if err != nil { + log.Infof("error reading exe: %s", err) + return + } + + var stat syscall.Stat_t + if err = syscall.Stat(binPath, &stat); err != nil { + log.Infof("could not stat binary path %s: %s", binPath, err) + return + } + binID := binaryID{ + Id_major: unix.Major(stat.Dev), + Id_minor: unix.Minor(stat.Dev), + Ino: stat.Ino, + } + pt.registerProcess(binID, pid, stat.Mtim, binPath, serviceName) +} + +func (pt *ProcessTracker) registerProcess(binID binaryID, pid pid, mTime syscall.Timespec, binaryPath string, serviceName string) { + pt.lock.Lock() + defer pt.lock.Unlock() + + pt.processes[pid] = binID + if bin, ok := pt.binaries[binID]; ok { + // process that uses this binary already exists + bin.processCount++ + } else { + + pt.binaries[binID] = &runningBinary{ + binID: binID, + mTime: mTime, + processCount: 1, + binaryPath: binaryPath, + serviceName: serviceName, + } + } + state := pt.currentState() + pt.callback(state) +} + +func getServiceName(pid uint32) string { + envVars, _, err := utils.EnvVars([]string{"DD"}, pid, model.MaxArgsEnvsSize) + if err != nil { + return "" + } + + serviceName := "" + diEnabled := false + for _, envVar := range envVars { + parts := strings.SplitN(envVar, "=", 2) + if len(parts) == 2 && parts[0] == "DD_SERVICE" { + serviceName = parts[1] + } + if len(parts) == 2 && parts[0] == "DD_DYNAMIC_INSTRUMENTATION_ENABLED" { + diEnabled = parts[1] == "true" + } + } + + if !diEnabled { + return "" + } + return serviceName +} + +func (pt *ProcessTracker) unregisterProcess(pid pid) { + pt.lock.Lock() + defer pt.lock.Unlock() + + binID, ok := pt.processes[pid] + if !ok { + return + } + delete(pt.processes, pid) + + bin, ok := pt.binaries[binID] + if !ok { + return + } + bin.processCount-- + if bin.processCount == 0 { + delete(pt.binaries, binID) + state := pt.currentState() + pt.callback(state) + } +} + +func (pt *ProcessTracker) currentState() map[ditypes.PID]*ditypes.ProcessInfo { + state := make(map[ditypes.PID]*ditypes.ProcessInfo) + + for pid, binID := range pt.processes { + bin := pt.binaries[binID] + state[pid] = &ditypes.ProcessInfo{ + PID: pid, + BinaryPath: bin.binaryPath, + ServiceName: bin.serviceName, + + ProbesByID: make(map[ditypes.ProbeID]*ditypes.Probe), + InstrumentationUprobes: make(map[ditypes.ProbeID]*link.Link), + InstrumentationObjects: make(map[ditypes.ProbeID]*ebpf.Collection), + } + } + return state +} diff --git a/pkg/dynamicinstrumentation/proctracker/types.go b/pkg/dynamicinstrumentation/proctracker/types.go new file mode 100644 index 0000000000000..a377cbef780d8 --- /dev/null +++ b/pkg/dynamicinstrumentation/proctracker/types.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package proctracker + +import ( + "syscall" + + "github.com/DataDog/datadog-agent/pkg/network/protocols/http/gotls" +) + +type pid = uint32 + +type binaryID = gotls.TlsBinaryId + +type runningBinary struct { + // Inode number of the binary + binID binaryID + + // Modification time of the hooked binary, at the time of hooking. + mTime syscall.Timespec + + // Reference counter for the number of currently running processes for + // this binary. + processCount int32 + + // The location of the binary on the filesystem, as a string. + binaryPath string + + // The value of DD_SERVICE for the given binary. + // Associating a service name with a binary is not correct because + // we may have the same binary running with different service names + // on the same machine. However, for simplicity in the prototype we + // assume a 1:1 mapping. + serviceName string +} + +type binaries map[binaryID]*runningBinary +type processes map[pid]binaryID diff --git a/pkg/dynamicinstrumentation/ratelimiter/ratelimit.go b/pkg/dynamicinstrumentation/ratelimiter/ratelimit.go new file mode 100644 index 0000000000000..0283c526c5c05 --- /dev/null +++ b/pkg/dynamicinstrumentation/ratelimiter/ratelimit.go @@ -0,0 +1,92 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package ratelimiter implements a simple rate limiter used for tracking and limiting +// the rate of events being produced per probe +package ratelimiter + +import ( + "math" + + "golang.org/x/time/rate" +) + +// SingleRateLimiter is a wrapper on top of golang.org/x/time/rate which implements a rate limiter but also +// returns the effective rate of allowance. +type SingleRateLimiter struct { + rate float64 + limiter *rate.Limiter + droppedEvents int64 + successfulEvents int64 +} + +// MultiProbeRateLimiter is used for tracking and limiting the rate of events +// being produced for multiple probes +type MultiProbeRateLimiter struct { + defaultRate float64 + x map[string]*SingleRateLimiter +} + +// NewMultiProbeRateLimiter creates a new MultiProbeRateLimiter +func NewMultiProbeRateLimiter(defaultRatePerSecond float64) *MultiProbeRateLimiter { + return &MultiProbeRateLimiter{ + defaultRate: defaultRatePerSecond, + x: map[string]*SingleRateLimiter{}, + } +} + +// SetRate sets the rate for events with a specific ID. Specify mps=0 to +// disable rate limiting. +func (mr *MultiProbeRateLimiter) SetRate(id string, mps float64) { + mr.x[id] = NewSingleEventRateLimiter(mps) +} + +// AllowOneEvent is called to determine if an event should be allowed according to +// the configured rate limit. It returns a bool to say allowed or not, then the number +// of dropped events, and then the number of successful events +func (mr *MultiProbeRateLimiter) AllowOneEvent(id string) (bool, int64, int64) { + rateLimiter, ok := mr.x[id] + if !ok { + mr.SetRate(id, mr.defaultRate) + rateLimiter = mr.x[id] + } + return rateLimiter.AllowOneEvent(), + rateLimiter.droppedEvents, rateLimiter.successfulEvents +} + +// NewSingleEventRateLimiter returns a rate limiter which restricts the number of single events sampled per second. +// This defaults to infinite, allow all behaviour. The MaxPerSecond value of the rule may override the default. +func NewSingleEventRateLimiter(mps float64) *SingleRateLimiter { + limit := math.MaxFloat64 + if mps > 0 { + limit = mps + } + return &SingleRateLimiter{ + rate: mps, + limiter: rate.NewLimiter(rate.Limit(limit), int(math.Ceil(limit))), + } +} + +// AllowOneEvent returns the rate limiter's decision to allow an event to be processed, and the +// effective rate at the time it is called. The effective rate is computed by averaging the rate +// for the previous second with the current rate +func (r *SingleRateLimiter) AllowOneEvent() bool { + + if r.rate == 0 { + return true + } + + var sampled = false + if r.limiter.Allow() { + sampled = true + r.successfulEvents++ + } else { + r.droppedEvents++ + } + + return sampled +} diff --git a/pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go b/pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go new file mode 100644 index 0000000000000..88cc21aa199a0 --- /dev/null +++ b/pkg/dynamicinstrumentation/ratelimiter/ratelimit_test.go @@ -0,0 +1,48 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ratelimiter + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRateLimit(t *testing.T) { + + testCases := []struct { + name string + limitPerSecond float64 + }{ + { + name: "expected1", + limitPerSecond: 1.0, + }, + { + name: "expected2", + limitPerSecond: 5.0, + }, + } + + for _, testcase := range testCases { + + const timesToRun = 10000 + t.Run(testcase.name, func(t *testing.T) { + + r := NewSingleEventRateLimiter(testcase.limitPerSecond) + + for i := 0; i < timesToRun; i++ { + r.AllowOneEvent() + } + + assert.Equal(t, float64(timesToRun-float64(r.droppedEvents)), testcase.limitPerSecond) + assert.Equal(t, float64(r.droppedEvents), timesToRun-testcase.limitPerSecond) + assert.Equal(t, float64(r.successfulEvents), testcase.limitPerSecond) + }) + } +} diff --git a/pkg/dynamicinstrumentation/ringbufconsumer.go b/pkg/dynamicinstrumentation/ringbufconsumer.go new file mode 100644 index 0000000000000..ed6c574377ce5 --- /dev/null +++ b/pkg/dynamicinstrumentation/ringbufconsumer.go @@ -0,0 +1,64 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package dynamicinstrumentation + +import ( + "fmt" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/eventparser" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ratelimiter" + "github.com/cilium/ebpf/ringbuf" +) + +// startRingbufferConsumer opens the pinned bpf ringbuffer map +func (goDI *GoDI) startRingbufferConsumer() (func(), error) { + r, err := ringbuf.NewReader(ditypes.EventsRingbuffer) + if err != nil { + return nil, fmt.Errorf("couldn't set up reader for ringbuffer: %w", err) + } + + var ( + record ringbuf.Record + closed = false + ) + + closeFunc := func() { + closed = true + r.Close() + } + + // TODO: ensure rate limiters are removed once probes are removed + rateLimiters := ratelimiter.NewMultiProbeRateLimiter(1.0) + rateLimiters.SetRate(ditypes.ConfigBPFProbeID, 0) + + go func() { + for { + if closed { + break + } + err = r.ReadInto(&record) + if err != nil { + log.Infof("couldn't read event off ringbuffer: %s", err.Error()) + continue + } + + event := eventparser.ParseEvent(record.RawSample, rateLimiters) + if event == nil { + continue + } + goDI.stats.PIDEventsCreatedCount[event.PID]++ + goDI.stats.ProbeEventsCreatedCount[event.ProbeID]++ + goDI.processEvent(event) + } + }() + + return closeFunc, nil +} diff --git a/pkg/dynamicinstrumentation/uploader/di_log_converter.go b/pkg/dynamicinstrumentation/uploader/di_log_converter.go new file mode 100644 index 0000000000000..8f6d3063dc7bf --- /dev/null +++ b/pkg/dynamicinstrumentation/uploader/di_log_converter.go @@ -0,0 +1,159 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package uploader + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + + "github.com/google/uuid" +) + +// NewDILog creates a new snapshot upload based on the event and relevant process +func NewDILog(procInfo *ditypes.ProcessInfo, event *ditypes.DIEvent) *ditypes.SnapshotUpload { + if procInfo == nil { + log.Infof("Process with pid %d not found, ignoring event", event.PID) + return nil + } + probe := procInfo.GetProbe(event.ProbeID) + if probe == nil { + log.Info("Probe ID not found, ignoring event", event.ProbeID) + return nil + } + + snapshotID, _ := uuid.NewUUID() + argDefs := getFunctionArguments(procInfo, probe) + var captures ditypes.Captures + if probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters { + captures = convertCaptures(argDefs, event.Argdata) + } else { + captures = reportCaptureError(argDefs) + } + + capturesJSON, _ := json.Marshal(captures) + stackTrace, err := parseStackTrace(procInfo, event.StackPCs) + if err != nil { + log.Infof("event from pid/probe %d/%s does not include stack trace: %s\n", event.PID, event.ProbeID, err) + } + return &ditypes.SnapshotUpload{ + Service: probe.ServiceName, + Message: fmt.Sprintf("%s %s", probe.FuncName, capturesJSON), + DDSource: "dd_debugger", + DDTags: "", + Debugger: struct { + ditypes.Snapshot `json:"snapshot"` + }{ + Snapshot: ditypes.Snapshot{ + ID: &snapshotID, + Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + Language: "go", + ProbeInSnapshot: convertProbe(probe), + Captures: captures, + Stack: stackTrace, + }, + }, + Duration: 0, + } +} + +func convertProbe(probe *ditypes.Probe) ditypes.ProbeInSnapshot { + module, function := parseFuncName(probe.FuncName) + return ditypes.ProbeInSnapshot{ + ID: getProbeUUID(probe.ID), + ProbeLocation: ditypes.ProbeLocation{ + Method: function, + Type: module, + }, + } +} + +func convertCaptures(defs []ditypes.Parameter, captures []*ditypes.Param) ditypes.Captures { + return ditypes.Captures{ + Entry: &ditypes.Capture{ + Arguments: convertArgs(defs, captures), + }, + } +} + +func reportCaptureError(defs []ditypes.Parameter) ditypes.Captures { + args := make(map[string]*ditypes.CapturedValue) + for _, def := range defs { + args[def.Name] = &ditypes.CapturedValue{ + Type: def.Type, + NotCapturedReason: "Failed to instrument, type is unsupported or too complex", + } + } + return ditypes.Captures{ + Entry: &ditypes.Capture{ + Arguments: args, + }, + } +} + +func convertArgs(defs []ditypes.Parameter, captures []*ditypes.Param) map[string]*ditypes.CapturedValue { + args := make(map[string]*ditypes.CapturedValue) + for idx, capture := range captures { + var argName string + if idx < len(defs) { + argName = defs[idx].Name + } else { + argName = fmt.Sprintf("arg_%d", idx) + } + + if capture == nil { + continue + } + + cv := &ditypes.CapturedValue{Type: capture.Type} + if capture.ValueStr != "" || capture.Type == "string" { + // we make a copy of the string so the pointer isn't overwritten in the loop + valueCopy := capture.ValueStr + cv.Value = &valueCopy + } + if capture.Fields != nil && idx < len(defs) { + cv.Fields = convertArgs(defs[idx].ParameterPieces, capture.Fields) + } + args[argName] = cv + } + return args +} + +func parseFuncName(funcName string) (string, string) { + parts := strings.Split(funcName, ".") + if len(parts) == 2 { + return parts[0], parts[1] + } + return "", funcName +} + +func getFunctionArguments(proc *ditypes.ProcessInfo, probe *ditypes.Probe) []ditypes.Parameter { + return proc.TypeMap.Functions[probe.FuncName] +} + +func getProbeUUID(probeID string) string { + // the RC config ID format is datadog///_/ + // if we fail to parse it, we just return the original probeID string + parts := strings.Split(probeID, "/") + if len(parts) != 5 { + return probeID + } + idPart := parts[len(parts)-2] + parts = strings.Split(idPart, "_") + if len(parts) != 2 { + return probeID + } + // we could also validate that the extracted string is a valid UUID, + // but it's not necessary since we tolerate IDs that don't parse + return parts[1] +} diff --git a/pkg/dynamicinstrumentation/uploader/offline.go b/pkg/dynamicinstrumentation/uploader/offline.go new file mode 100644 index 0000000000000..a1d19375ebf47 --- /dev/null +++ b/pkg/dynamicinstrumentation/uploader/offline.go @@ -0,0 +1,83 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package uploader + +import ( + "encoding/json" + "os" + "sync" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +// OfflineSerializer is used for serializing events and printing instead of +// uploading to the DataDog backend +type OfflineSerializer[T any] struct { + outputFile *os.File + mu sync.Mutex +} + +// NewOfflineLogSerializer creates an offline serializer for serializing events and printing instead of +// uploading to the DataDog backend +func NewOfflineLogSerializer(outputPath string) (*OfflineSerializer[ditypes.SnapshotUpload], error) { + if outputPath == "" { + panic("No snapshot output path set") + } + return NewOfflineSerializer[ditypes.SnapshotUpload](outputPath) +} + +// NewOfflineDiagnosticSerializer creates an offline serializer for serializing diagnostic information +// and printing instead of uploading to the DataDog backend +func NewOfflineDiagnosticSerializer(dm *diagnostics.DiagnosticManager, outputPath string) (*OfflineSerializer[ditypes.DiagnosticUpload], error) { + if outputPath == "" { + panic("No diagnostic output path set") + } + ds, err := NewOfflineSerializer[ditypes.DiagnosticUpload](outputPath) + if err != nil { + return nil, err + } + go func() { + for diagnostic := range dm.Updates { + ds.Enqueue(diagnostic) + } + }() + return ds, nil +} + +// NewOfflineSerializer is the generic create method for offline serialization +// of events or diagnostic output +func NewOfflineSerializer[T any](outputPath string) (*OfflineSerializer[T], error) { + file, err := os.OpenFile(outputPath, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644) + if err != nil { + return nil, err + } + u := &OfflineSerializer[T]{ + outputFile: file, + } + return u, nil +} + +// Enqueue writes data to the offline serializer +func (s *OfflineSerializer[T]) Enqueue(item *T) bool { + s.mu.Lock() + defer s.mu.Unlock() + bs, err := json.Marshal(item) + if err != nil { + log.Info("Failed to marshal item", item) + return false + } + + _, err = s.outputFile.WriteString(string(bs) + "\n") + if err != nil { + log.Error(err) + } + return true +} diff --git a/pkg/dynamicinstrumentation/uploader/stack_trace.go b/pkg/dynamicinstrumentation/uploader/stack_trace.go new file mode 100644 index 0000000000000..f428e2c40e0d2 --- /dev/null +++ b/pkg/dynamicinstrumentation/uploader/stack_trace.go @@ -0,0 +1,151 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package uploader + +import ( + "cmp" + "debug/dwarf" + "errors" + "fmt" + "slices" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +// parseStackTrace parses a raw byte array into 10 uint64 program counters +// which then get resolved into strings representing lines of a stack trace +func parseStackTrace(procInfo *ditypes.ProcessInfo, rawProgramCounters []uint64) ([]ditypes.StackFrame, error) { + stackTrace := make([]ditypes.StackFrame, 0) + if procInfo == nil { + return stackTrace, errors.New("nil process info") + } + + for i := range rawProgramCounters { + if rawProgramCounters[i] == 0 { + break + } + + entries, ok := procInfo.TypeMap.InlinedFunctions[rawProgramCounters[i]] + if ok { + for n := range entries { + inlinedFuncInfo, err := pcToLine(procInfo, rawProgramCounters[i]) + if err != nil { + return stackTrace, fmt.Errorf("could not resolve pc to inlined function info: %w", err) + } + + symName, lineNumber, err := parseInlinedEntry(procInfo.DwarfData.Reader(), entries[n]) + if err != nil { + return stackTrace, fmt.Errorf("could not get inlined entries: %w", err) + } + stackFrame := ditypes.StackFrame{Function: fmt.Sprintf("%s [inlined in %s]", symName, inlinedFuncInfo.fn), FileName: inlinedFuncInfo.file, Line: int(lineNumber)} + stackTrace = append(stackTrace, stackFrame) + } + } + + funcInfo, err := pcToLine(procInfo, rawProgramCounters[i]) + if err != nil { + return stackTrace, fmt.Errorf("could not resolve pc to function info: %w", err) + } + stackFrame := ditypes.StackFrame{Function: funcInfo.fn, FileName: funcInfo.file, Line: int(funcInfo.line)} + stackTrace = append(stackTrace, stackFrame) + + if funcInfo.fn == "main.main" { + break + } + } + return stackTrace, nil +} + +type funcInfo struct { + file string + line int64 + fn string +} + +func pcToLine(procInfo *ditypes.ProcessInfo, pc uint64) (*funcInfo, error) { + + var ( + file string + line int64 + fn string + ) + + typeMap := procInfo.TypeMap + + functionIndex, _ := slices.BinarySearchFunc(typeMap.FunctionsByPC, &ditypes.LowPCEntry{LowPC: pc}, func(a, b *ditypes.LowPCEntry) int { + return cmp.Compare(b.LowPC, a.LowPC) + }) + + var fileNumber int64 + + if functionIndex >= len(typeMap.FunctionsByPC) { + return nil, fmt.Errorf("invalid function index") + } + funcEntry := typeMap.FunctionsByPC[functionIndex].Entry + for _, field := range funcEntry.Field { + if field.Attr == dwarf.AttrName { + fn = field.Val.(string) + } + if field.Attr == dwarf.AttrDeclFile { + fileNumber = field.Val.(int64) + } + if field.Attr == dwarf.AttrDeclLine { + line = field.Val.(int64) + } + } + + compileUnitIndex, _ := slices.BinarySearchFunc(typeMap.DeclaredFiles, &ditypes.LowPCEntry{LowPC: pc}, func(a, b *ditypes.LowPCEntry) int { + return cmp.Compare(b.LowPC, a.LowPC) + }) + + compileUnitEntry := typeMap.DeclaredFiles[compileUnitIndex].Entry + + cuLineReader, err := procInfo.DwarfData.LineReader(compileUnitEntry) + if err != nil { + return nil, fmt.Errorf("could not get file line reader for compile unit: %w", err) + } + files := cuLineReader.Files() + if len(files) < int(fileNumber) { + return nil, fmt.Errorf("invalid file number in dwarf function entry associated with compile unit") + } + + file = files[fileNumber].Name + + return &funcInfo{ + file: file, + line: line, + fn: fn, + }, nil +} + +func parseInlinedEntry(reader *dwarf.Reader, e *dwarf.Entry) (name string, line int64, err error) { + + var offset dwarf.Offset + + for i := range e.Field { + if e.Field[i].Attr == dwarf.AttrAbstractOrigin { + offset = e.Field[i].Val.(dwarf.Offset) + reader.Seek(offset) + entry, err := reader.Next() + if err != nil { + return "", -1, fmt.Errorf("could not read inlined function origin: %w", err) + } + for j := range entry.Field { + if entry.Field[j].Attr == dwarf.AttrName { + name = entry.Field[j].Val.(string) + } + } + } + + if e.Field[i].Attr == dwarf.AttrCallLine { + line = e.Field[i].Val.(int64) + } + } + + return name, line, nil +} diff --git a/pkg/dynamicinstrumentation/uploader/uploader.go b/pkg/dynamicinstrumentation/uploader/uploader.go new file mode 100644 index 0000000000000..f14fa8233e0a4 --- /dev/null +++ b/pkg/dynamicinstrumentation/uploader/uploader.go @@ -0,0 +1,221 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +// Package uploader provides functionality for uploading events and diagnostic +// information to the DataDog backend +package uploader + +import ( + "bytes" + "encoding/json" + "fmt" + "mime/multipart" + "net/http" + "net/textproto" + "os" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diagnostics" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +// LogUploader is the interface for uploading Dynamic Instrumentation logs +type LogUploader interface { + Enqueue(item *ditypes.SnapshotUpload) bool +} + +// DiagnosticUploader is the interface for uploading Dynamic Instrumentation +// diagnostic information +type DiagnosticUploader interface { + Enqueue(item *ditypes.DiagnosticUpload) bool +} + +// Uploader is a generic form of uploader functionality +type Uploader[T any] struct { + buffer chan *T + client *http.Client + + batchSize int + uploadMode UploadMode +} + +// UploadMode reflects the kind of data that is being uploaded +type UploadMode bool + +const ( + // UploadModeDiagnostic means the data being uploaded is diagnostic information + UploadModeDiagnostic UploadMode = true + //UploadModeLog means the data being uploaded is logs + UploadModeLog UploadMode = false +) + +func startDiagnosticUploader(dm *diagnostics.DiagnosticManager) *Uploader[ditypes.DiagnosticUpload] { + u := NewUploader[ditypes.DiagnosticUpload](UploadModeDiagnostic) + go func() { + for diagnostic := range dm.Updates { + u.Enqueue(diagnostic) + } + }() + return u +} + +// NewLogUploader creates a new log uploader +func NewLogUploader() *Uploader[ditypes.SnapshotUpload] { + return NewUploader[ditypes.SnapshotUpload](UploadModeLog) +} + +// NewDiagnosticUploader creates a new diagnostic uploader +func NewDiagnosticUploader() *Uploader[ditypes.DiagnosticUpload] { + return startDiagnosticUploader(diagnostics.Diagnostics) +} + +// NewUploader creates a new uploader of a specified generic type +func NewUploader[T any](mode UploadMode) *Uploader[T] { + u := &Uploader[T]{ + buffer: make(chan *T, 100), + client: &http.Client{}, + + batchSize: 100, + uploadMode: mode, + } + go u.processBuffer() + return u +} + +// Enqueue enqueues data to be uploaded. It's return value reflects whether +// or not the upload queue was full +func (u *Uploader[T]) Enqueue(item *T) bool { + select { + case u.buffer <- item: + return true + default: + log.Infof("Uploader buffer full, dropping message %+v", item) + return false + } +} + +func (u *Uploader[T]) processBuffer() { + flushTimer := time.NewTicker(1 * time.Second) + defer flushTimer.Stop() + + batch := make([]*T, 0, 5) + + for { + select { + case item := <-u.buffer: + batch = append(batch, item) + if len(batch) >= u.batchSize { + batchCopy := make([]*T, len(batch)) + copy(batchCopy, batch) + go u.uploadBatch(batchCopy) + batch = batch[:0] + flushTimer.Reset(1 * time.Second) + } + case <-flushTimer.C: + if len(batch) > 0 { + batchCopy := make([]*T, len(batch)) + copy(batchCopy, batch) + go u.uploadBatch(batchCopy) + batch = batch[:0] + } + flushTimer.Reset(1 * time.Second) + } + } +} + +func (u *Uploader[T]) uploadBatch(batch []*T) { + switch u.uploadMode { + case UploadModeDiagnostic: + u.uploadDiagnosticBatch(batch) + case UploadModeLog: + u.uploadLogBatch(batch) + } +} + +// there's no need to do endpoint discovery, we can just hardcode the URLs +// it's guaranteed that if datadog-agent has Go DI it will also have the proxy upload endpoints + +func (u *Uploader[T]) uploadLogBatch(batch []*T) { + // TODO: find out if there are more efficient ways of sending logs to the backend + // this is the way all other DI runtimes upload data + url := fmt.Sprintf("http://%s:8126/debugger/v1/input", getAgentHost()) + body, _ := json.Marshal(batch) + req, err := http.NewRequest("POST", url, bytes.NewReader(body)) + if err != nil { + log.Info("Failed to build request", err) + return + } + req.Header.Set("Content-Type", "application/json") + + resp, err := u.client.Do(req) + if err != nil { + log.Info("Error uploading log batch", err) + return + } + defer resp.Body.Close() + log.Info("HTTP", resp.StatusCode, url) +} + +func (u *Uploader[T]) uploadDiagnosticBatch(batch []*T) { + url := fmt.Sprintf("http://%s:8126/debugger/v1/diagnostics", getAgentHost()) + + // Create a buffer to hold the multipart form data + var b bytes.Buffer + w := multipart.NewWriter(&b) + + diagnosticJSON, err := json.Marshal(batch) + if err != nil { + log.Info("Failed to marshal diagnostic batch", err, batch) + return + } + + header := make(textproto.MIMEHeader) + header.Set("Content-Disposition", `form-data; name="event"; filename="event.json"`) + header.Set("Content-Type", "application/json") + fw, err := w.CreatePart(header) + if err != nil { + log.Info("Failed to create form file", err) + return + } + + // Write the JSON data to the form-data part + if _, err = fw.Write(diagnosticJSON); err != nil { + log.Info("Failed to write data to form file", err) + return + } + + // Close the multipart writer, otherwise the request will be missing the terminating boundary. + w.Close() + + // Create a new request + req, err := http.NewRequest("POST", url, &b) + if err != nil { + log.Info("Failed to build request", err) + return + } + + // Set the content type to multipart/form-data and include the boundary + req.Header.Set("Content-Type", w.FormDataContentType()) + resp, err := u.client.Do(req) + if err != nil { + log.Info("Error uploading diagnostic batch", err) + return + } + defer resp.Body.Close() + + log.Info("HTTP", resp.StatusCode, url) +} + +func getAgentHost() string { + ddAgentHost := os.Getenv("DD_AGENT_HOST") + if ddAgentHost == "" { + ddAgentHost = "localhost" + } + return ddAgentHost +} diff --git a/pkg/dynamicinstrumentation/util/file_watcher.go b/pkg/dynamicinstrumentation/util/file_watcher.go new file mode 100644 index 0000000000000..ea05116ea4b24 --- /dev/null +++ b/pkg/dynamicinstrumentation/util/file_watcher.go @@ -0,0 +1,61 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package util + +import ( + "os" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// FileWatcher is used to track updates to a particular filepath +type FileWatcher struct { + filePath string +} + +// NewFileWatcher creates a FileWatcher to track updates to a specified file +func NewFileWatcher(filePath string) *FileWatcher { + return &FileWatcher{filePath: filePath} +} + +func (fw *FileWatcher) readFile() ([]byte, error) { + content, err := os.ReadFile(fw.filePath) + if err != nil { + return nil, err + } + return content, nil +} + +// Watch watches the target file for changes and returns a channel that will receive +// the file's content whenever it changes. +// The initial implementation used fsnotify, but this was losing update events when running +// e2e tests - this simpler implementation behaves as expected, even if it's less efficient. +// Since this is meant to be used only for testing and development, it's fine to keep this +// implementation. +func (fw *FileWatcher) Watch() (<-chan []byte, error) { + updateChan := make(chan []byte) + prevContent := []byte{} + ticker := time.NewTicker(100 * time.Millisecond) + go func() { + defer close(updateChan) + for range ticker.C { + content, err := fw.readFile() + if err != nil { + log.Infof("Error reading file %s: %s", fw.filePath, err) + return + } + if len(content) > 0 && string(content) != string(prevContent) { + prevContent = content + updateChan <- content + } + } + }() + + return updateChan, nil +} diff --git a/pkg/dynamicinstrumentation/util/file_watcher_test.go b/pkg/dynamicinstrumentation/util/file_watcher_test.go new file mode 100644 index 0000000000000..894395b9f61c0 --- /dev/null +++ b/pkg/dynamicinstrumentation/util/file_watcher_test.go @@ -0,0 +1,110 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package util + +import ( + "io/fs" + "os" + "path/filepath" + "testing" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/stretchr/testify/assert" +) + +func TestFileWatcherMultipleFiles(t *testing.T) { + // create two temporary files + f1, _ := os.CreateTemp("", "file-watcher-test-") + f2, _ := os.CreateTemp("", "file-watcher-test-") + defer f1.Close() + defer f2.Close() + defer os.Remove(f1.Name()) + defer os.Remove(f2.Name()) + + // get the absolute path for both files + fp1, _ := filepath.Abs(f1.Name()) + fp2, _ := filepath.Abs(f2.Name()) + + // initialize file contents + os.WriteFile(fp1, []byte("This is file 1"), fs.ModeAppend) + os.WriteFile(fp2, []byte("This is file 2"), fs.ModeAppend) + + // initialize file watchers + fw1 := NewFileWatcher(fp1) + fw2 := NewFileWatcher(fp2) + + ch1, err := fw1.Watch() + assert.NoError(t, err) + ch2, err := fw2.Watch() + assert.NoError(t, err) + + fc1 := <-ch1 + assert.Equal(t, "This is file 1", string(fc1)) + fc2 := <-ch2 + assert.Equal(t, "This is file 2", string(fc2)) + + os.WriteFile(fp1, []byte("Updated file 1"), fs.ModeAppend) + os.WriteFile(fp2, []byte("Updated file 2"), fs.ModeAppend) + + fc1 = <-ch1 + assert.Equal(t, "Updated file 1", string(fc1)) + fc2 = <-ch2 + assert.Equal(t, "Updated file 2", string(fc2)) +} + +func TestFileWatcherDeletedFile(t *testing.T) { + timeout := time.After(1 * time.Second) + done := make(chan bool) + go func() { + f, _ := os.CreateTemp("", "file-watcher-delete-test-") + defer f.Close() + defer os.Remove(f.Name()) + + fp, _ := filepath.Abs(f.Name()) + os.WriteFile(fp, []byte("Initial"), fs.ModeAppend) + + info, err := os.Stat(f.Name()) + if err != nil { + panic(err) + } + m := info.Mode() + + fw := NewFileWatcher(fp) + ch, err := fw.Watch() + assert.NoError(t, err) + + fc := <-ch + assert.Equal(t, "Initial", string(fc)) + + // delete file and check that we are still receiving updates + os.Remove(f.Name()) + os.WriteFile(fp, []byte("Updated"), fs.ModeAppend) + err = os.Chmod(fp, m) + assert.NoError(t, err) + + info, err = os.Stat(f.Name()) + if err != nil { + panic(err) + } + m = info.Mode() + log.Info(m) + + fc, ok := <-ch + assert.True(t, ok, "expected channel to be open") + assert.Equal(t, "Updated", string(fc), "expected to receive new file contents on channel") + done <- true + }() + + select { + case <-timeout: + t.Fatal("Timeout exceeded") + case <-done: + } +} diff --git a/pkg/ebpf/bytecode/runtime/.gitignore b/pkg/ebpf/bytecode/runtime/.gitignore index a4383358ec72f..9b4fc67872634 100644 --- a/pkg/ebpf/bytecode/runtime/.gitignore +++ b/pkg/ebpf/bytecode/runtime/.gitignore @@ -1 +1,14 @@ *.d + +# runtime compilation asset integrity files +conntrack.go +dynamicinstrumentation.go +http.go +logdebug-test.go +offsetguess-test.go +oom-kill.go +runtime-security.go +shared-libraries.go +tcp-queue-length.go +tracer.go +usm.go diff --git a/pkg/ebpf/bytecode/runtime/asset.go b/pkg/ebpf/bytecode/runtime/asset.go index 2d0812368b5b2..0c0add2da5702 100644 --- a/pkg/ebpf/bytecode/runtime/asset.go +++ b/pkg/ebpf/bytecode/runtime/asset.go @@ -8,7 +8,9 @@ package runtime import ( + "bytes" "crypto/sha256" + "encoding/hex" "fmt" "io" "os" @@ -37,34 +39,59 @@ func newAsset(filename, hash string) *asset { } } +// CompileOptions are options used to compile eBPF programs at runtime +type CompileOptions struct { + // AdditionalFlags are extra flags passed to clang + AdditionalFlags []string + // ModifyCallback is a callback function that is allowed to modify the contents before compilation + ModifyCallback func(in io.Reader, out io.Writer) error + // StatsdClient is a statsd client to use for telemetry + StatsdClient statsd.ClientInterface + // UseKernelHeaders enables the inclusion of kernel headers from the host + UseKernelHeaders bool +} + // Compile compiles the asset to an object file, writes it to the configured output directory, and // then opens and returns the compiled output func (a *asset) Compile(config *ebpf.Config, additionalFlags []string, client statsd.ClientInterface) (CompiledOutput, error) { + return a.compile(config, CompileOptions{AdditionalFlags: additionalFlags, StatsdClient: client, UseKernelHeaders: true}) +} + +// CompileWithOptions is the same as Compile, but takes an options struct with additional choices. +func (a *asset) CompileWithOptions(config *ebpf.Config, opts CompileOptions) (CompiledOutput, error) { + return a.compile(config, opts) +} + +func (a *asset) compile(config *ebpf.Config, opts CompileOptions) (CompiledOutput, error) { log.Debugf("starting runtime compilation of %s", a.filename) start := time.Now() a.tm.compilationEnabled = true defer func() { a.tm.compilationDuration = time.Since(start) - if client != nil { - a.tm.SubmitTelemetry(a.filename, client) + if opts.StatsdClient != nil { + a.tm.SubmitTelemetry(a.filename, opts.StatsdClient) } }() - opts := kernel.HeaderOptions{ - DownloadEnabled: config.EnableKernelHeaderDownload, - Dirs: config.KernelHeadersDirs, - DownloadDir: config.KernelHeadersDownloadDir, - AptConfigDir: config.AptConfigDir, - YumReposDir: config.YumReposDir, - ZypperReposDir: config.ZypperReposDir, - } - kernelHeaders := kernel.GetKernelHeaders(opts, client) - if len(kernelHeaders) == 0 { - a.tm.compilationResult = headerFetchErr - return nil, fmt.Errorf("unable to find kernel headers") + var kernelHeaders []string + if opts.UseKernelHeaders { + headerOpts := kernel.HeaderOptions{ + DownloadEnabled: config.EnableKernelHeaderDownload, + Dirs: config.KernelHeadersDirs, + DownloadDir: config.KernelHeadersDownloadDir, + AptConfigDir: config.AptConfigDir, + YumReposDir: config.YumReposDir, + ZypperReposDir: config.ZypperReposDir, + } + kernelHeaders = kernel.GetKernelHeaders(headerOpts, opts.StatsdClient) + if len(kernelHeaders) == 0 { + a.tm.compilationResult = headerFetchErr + return nil, fmt.Errorf("unable to find kernel headers") + } } + a.tm.compilationResult = verificationError outputDir := config.RuntimeCompilerOutputDir p := filepath.Join(config.BPFDir, "runtime", a.filename) @@ -78,22 +105,61 @@ func (a *asset) Compile(config *ebpf.Config, additionalFlags []string, client st return nil, fmt.Errorf("unable to create compiler output directory %s: %w", outputDir, err) } - protectedFile, err := createProtectedFile(fmt.Sprintf("%s-%s", a.filename, a.hash), outputDir, f) + diskProtectedFile, err := createProtectedFile(fmt.Sprintf("%s-%s", a.filename, a.hash), outputDir, f) if err != nil { return nil, fmt.Errorf("failed to create ram backed file from %s: %w", f.Name(), err) } defer func() { - if err := protectedFile.Close(); err != nil { - log.Debugf("error closing protected file %s: %s", protectedFile.Name(), err) + if err := diskProtectedFile.Close(); err != nil { + log.Debugf("error closing protected file %s: %s", diskProtectedFile.Name(), err) } }() + protectedFile := diskProtectedFile + hash := a.hash - if err = a.verify(protectedFile); err != nil { - a.tm.compilationResult = verificationError + if err = a.verify(diskProtectedFile); err != nil { return nil, fmt.Errorf("error reading input file: %s", err) } - out, result, err := compileToObjectFile(protectedFile.Name(), outputDir, a.filename, a.hash, additionalFlags, kernelHeaders) + a.tm.compilationResult = compilationErr + if opts.ModifyCallback != nil { + outBuf := &bytes.Buffer{} + // seek to the start and read all of protected file contents + if _, err := diskProtectedFile.Seek(0, io.SeekStart); err != nil { + return nil, fmt.Errorf("seek disk protected file: %w", err) + } + + // run modify callback + if err := opts.ModifyCallback(diskProtectedFile, outBuf); err != nil { + return nil, fmt.Errorf("modify callback: %w", err) + } + outReader := bytes.NewReader(outBuf.Bytes()) + + // update hash + hash, err = sha256Reader(outReader) + if err != nil { + return nil, fmt.Errorf("hash post-modification protected file: %w", err) + } + if _, err := outReader.Seek(0, io.SeekStart); err != nil { + return nil, fmt.Errorf("seek post-modification contents: %w", err) + } + + // create new protected file with the post-modification contents + postModifyProtectedFile, err := createProtectedFile(fmt.Sprintf("%s-%s", a.filename, hash), outputDir, outReader) + if err != nil { + return nil, fmt.Errorf("create post-modification protected file: %w", err) + } + defer func() { + if err := postModifyProtectedFile.Close(); err != nil { + log.Debugf("close post-modification protected file %s: %s", postModifyProtectedFile.Name(), err) + } + }() + + // set compilation to use post-modification contents + protectedFile = postModifyProtectedFile + } + + out, result, err := compileToObjectFile(protectedFile.Name(), outputDir, a.filename, hash, opts.AdditionalFlags, kernelHeaders) a.tm.compilationResult = result return out, err @@ -111,17 +177,24 @@ func createProtectedFile(name, runtimeDir string, source io.Reader) (ProtectedFi // verify reads the asset from the reader and verifies the content hash matches what is expected. func (a *asset) verify(source ProtectedFile) error { - h := sha256.New() - if _, err := io.Copy(h, source.Reader()); err != nil { - return fmt.Errorf("error hashing file %s: %w", source.Name(), err) + sum, err := sha256Reader(source) + if err != nil { + return fmt.Errorf("hash file %s: %w", source.Name(), err) } - if fmt.Sprintf("%x", h.Sum(nil)) != a.hash { + if sum != a.hash { return fmt.Errorf("file content hash does not match expected value") } - return nil } +func sha256Reader(r io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, r); err != nil { + return "", err + } + return hex.EncodeToString(h.Sum(nil)), nil +} + // GetTelemetry returns the compilation telemetry for this asset func (a *asset) GetTelemetry() CompilationTelemetry { return a.tm diff --git a/pkg/ebpf/bytecode/runtime/protected_file.go b/pkg/ebpf/bytecode/runtime/protected_file.go index a59224f09cf19..4e25867cc815b 100644 --- a/pkg/ebpf/bytecode/runtime/protected_file.go +++ b/pkg/ebpf/bytecode/runtime/protected_file.go @@ -20,8 +20,7 @@ import ( // ProtectedFile represents a symlink to a sealed ram-backed file type ProtectedFile interface { - Close() error - Reader() io.Reader + io.ReadSeekCloser Name() string } @@ -104,6 +103,10 @@ func (m *ramBackedFile) Name() string { return m.symlink } -func (m *ramBackedFile) Reader() io.Reader { - return m.file +func (m *ramBackedFile) Seek(offset int64, whence int) (int64, error) { + return m.file.Seek(offset, whence) +} + +func (m *ramBackedFile) Read(p []byte) (n int, err error) { + return m.file.Read(p) } diff --git a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go index 9394d168d971a..4cd657d96d26f 100644 --- a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go +++ b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go @@ -76,7 +76,7 @@ func compileToObjectFile(inFile, outputDir, filename, inHash string, additionalF } // RHEL platforms back-ported the __BPF_FUNC_MAPPER macro, so we can always use the dynamic method there - if kv >= kernel.VersionCode(4, 10, 0) || family == "rhel" { + if len(kernelHeaders) > 0 && (kv >= kernel.VersionCode(4, 10, 0) || family == "rhel") { var helperPath string helperPath, err = includeHelperAvailability(kernelHeaders) if err != nil { diff --git a/pkg/ebpf/cgo/genpost.go b/pkg/ebpf/cgo/genpost.go index 65c357c2e6623..97035a29bea46 100644 --- a/pkg/ebpf/cgo/genpost.go +++ b/pkg/ebpf/cgo/genpost.go @@ -13,6 +13,7 @@ import ( "os" "regexp" "runtime" + "strings" ) func main() { @@ -23,11 +24,30 @@ func main() { b = removeAbsolutePath(b, runtime.GOOS) + int8variableNames := []string{ + "Buf", + "Cgroup", + "Cgroup_name", + "LocalAddr", + "LocalAddress", + "Probe_id", + "RemoteAddr", + "RemoteAddress", + "Request_fragment", + "Topic_name", + "Trigger_comm", + "Victim_comm", + } + // Convert []int8 to []byte in multiple generated fields from the kernel, to simplify // conversion to string; see golang.org/issue/20753 - convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(Request_fragment|Topic_name|Buf|Cgroup|RemoteAddr|LocalAddr|Cgroup_name|Fcomm|Tcomm)(\s+)\[(\d+)\]u?int8`) + convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(` + strings.Join(int8variableNames, "|") + `)(\s+)\[(\d+)\]u?int8`) b = convertInt8ArrayToByteArrayRegex.ReplaceAll(b, []byte("$1$2[$3]byte")) + // Convert generated pointers to CGo structs to uint64 + convertPointerToUint64Regex := regexp.MustCompile(`\*_Ctype_struct_(\w+)`) + b = convertPointerToUint64Regex.ReplaceAll(b, []byte("uint64")) + b, err = format.Source(b) if err != nil { log.Fatal(err) diff --git a/pkg/ebpf/compiler/compiler.go b/pkg/ebpf/compiler/compiler.go index a26c90c57c4e2..1553ef5d42ac4 100644 --- a/pkg/ebpf/compiler/compiler.go +++ b/pkg/ebpf/compiler/compiler.go @@ -66,10 +66,6 @@ func kernelHeaderPaths(headerDirs []string) []string { // CompileToObjectFile compiles an eBPF program func CompileToObjectFile(inFile, outputFile string, cflags []string, headerDirs []string) error { - if len(headerDirs) == 0 { - return fmt.Errorf("unable to find kernel headers") - } - tmpIncludeDir, err := writeStdarg() if err != nil { return err diff --git a/pkg/ebpf/config.go b/pkg/ebpf/config.go index c400e69da049e..3bcea9ba21de2 100644 --- a/pkg/ebpf/config.go +++ b/pkg/ebpf/config.go @@ -9,7 +9,7 @@ import ( "strings" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - aconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -75,6 +75,10 @@ type Config struct { // AttachKprobesWithKprobeEventsABI uses the kprobe_events ABI to attach kprobes rather than the newer perf ABI. AttachKprobesWithKprobeEventsABI bool + + // BypassEnabled is used in tests only. + // It enables a ebpf-manager feature to bypass programs on-demand for controlled visibility. + BypassEnabled bool } func key(pieces ...string) string { @@ -83,7 +87,7 @@ func key(pieces ...string) string { // NewConfig creates a config with ebpf-related settings func NewConfig() *Config { - cfg := aconfig.SystemProbe() + cfg := pkgconfigsetup.SystemProbe() sysconfig.Adjust(cfg) c := &Config{ diff --git a/pkg/ebpf/map_cleaner.go b/pkg/ebpf/map_cleaner.go index 311d294929c46..6c0493731b4f9 100644 --- a/pkg/ebpf/map_cleaner.go +++ b/pkg/ebpf/map_cleaner.go @@ -71,7 +71,7 @@ func (mc *MapCleaner[K, V]) Clean(interval time.Duration, preClean func() bool, // of a version comparison because some distros have backported this API), and fallback to // the old method otherwise. The new API is also more efficient because it minimizes the number of allocations. cleaner := mc.cleanWithoutBatches - if maps.BatchAPISupported() { + if mc.emap.CanUseBatchAPI() { cleaner = mc.cleanWithBatches } @@ -135,6 +135,10 @@ func (mc *MapCleaner[K, V]) cleanWithBatches(nowTS int64, shouldClean func(nowTS keysToDelete = append(keysToDelete, key) } + if err := it.Err(); err != nil { + log.Errorf("error iterating map=%s: %s", mc.emap, err) + } + var deletionError error if len(keysToDelete) > 0 { deletedCount, deletionError = mc.emap.BatchDelete(keysToDelete) @@ -179,6 +183,10 @@ func (mc *MapCleaner[K, V]) cleanWithoutBatches(nowTS int64, shouldClean func(no keysToDelete = append(keysToDelete, key) } + if err := entries.Err(); err != nil { + log.Errorf("error iterating map=%s: %s", mc.emap, err) + } + for _, k := range keysToDelete { err := mc.emap.Delete(&k) if err == nil { diff --git a/pkg/ebpf/maps/generic_map.go b/pkg/ebpf/maps/generic_map.go index 0e762c48cf00c..a8fbdb4c06665 100644 --- a/pkg/ebpf/maps/generic_map.go +++ b/pkg/ebpf/maps/generic_map.go @@ -9,6 +9,8 @@ package maps import ( + "bytes" + "encoding/binary" "errors" "fmt" "reflect" @@ -23,6 +25,8 @@ import ( const defaultBatchSize = 100 +var ErrBatchAPINotSupported = errors.New("batch API not supported for this map: check whether key is fixed-size, kernel supports batch API and if this map is not per-cpu") + // BatchAPISupported returns true if the kernel supports the batch API for maps var BatchAPISupported = funcs.MemoizeNoError(func() bool { // Do feature detection directly instead of based on kernel versions for more accuracy @@ -53,7 +57,18 @@ var BatchAPISupported = funcs.MemoizeNoError(func() bool { // GenericMap is a wrapper around ebpf.Map that allows to use generic types. // Also includes support for batch iterations type GenericMap[K any, V any] struct { - m *ebpf.Map + m *ebpf.Map + keySupportsBatchAPI bool +} + +func canBinaryReadKey[K any]() bool { + kval := new(K) + buffer := make([]byte, unsafe.Sizeof(*kval)) + reader := bytes.NewReader(buffer) + + err := binary.Read(reader, binary.LittleEndian, kval) + + return err == nil } // NewGenericMap creates a new GenericMap with the given spec. Key and Value sizes are automatically @@ -78,13 +93,21 @@ func NewGenericMap[K any, V any](spec *ebpf.MapSpec) (*GenericMap[K, V], error) spec.ValueSize = uint32(unsafe.Sizeof(vval)) } + // See if we can perform binary.Read on the key type. If we can't we can't use the batch API + // for this map + keySupportsBatchAPI := canBinaryReadKey[K]() + if !keySupportsBatchAPI { + log.Warnf("Key type %T does not support binary.Read, batch API will not be used for this map", kval) + } + m, err := ebpf.NewMap(spec) if err != nil { return nil, err } return &GenericMap[K, V]{ - m: m, + m: m, + keySupportsBatchAPI: keySupportsBatchAPI, }, nil } @@ -169,11 +192,19 @@ func (g *GenericMap[K, V]) Delete(key *K) error { // BatchDelete deletes a batch of keys from the map. Returns the number of deleted items func (g *GenericMap[K, V]) BatchDelete(keys []K) (int, error) { + if !g.CanUseBatchAPI() { + return 0, ErrBatchAPINotSupported + } + return g.m.BatchDelete(keys, nil) } // BatchUpdate updates a batch of keys in the map func (g *GenericMap[K, V]) BatchUpdate(keys []K, values []V, opts *ebpf.BatchOptions) (int, error) { + if !g.CanUseBatchAPI() { + return 0, ErrBatchAPINotSupported + } + return g.m.BatchUpdate(keys, values, opts) } @@ -186,6 +217,12 @@ type GenericMapIterator[K any, V any] interface { Err() error } +// CanUseBatchAPI returns whether this map can use the batch API. Takes into account map type, batch API support +// in the kernel and the key type (keys with pointers). Returns an error describing the reason. +func (g *GenericMap[K, V]) CanUseBatchAPI() bool { + return g.keySupportsBatchAPI && BatchAPISupported() && !g.isPerCPU() +} + func isPerCPU(t ebpf.MapType) bool { switch t { case ebpf.PerCPUHash, ebpf.PerCPUArray, ebpf.LRUCPUHash: @@ -221,7 +258,7 @@ func (g *GenericMap[K, V]) IterateWithBatchSize(batchSize int) GenericMapIterato batchSize = int(g.m.MaxEntries()) } - if BatchAPISupported() && !g.isPerCPU() && batchSize > 1 { + if batchSize > 1 && g.CanUseBatchAPI() { it := &genericMapBatchIterator[K, V]{ m: g.m, batchSize: batchSize, diff --git a/pkg/ebpf/maps/generic_map_test.go b/pkg/ebpf/maps/generic_map_test.go index 261571b440a73..3bc59f494f8b6 100644 --- a/pkg/ebpf/maps/generic_map_test.go +++ b/pkg/ebpf/maps/generic_map_test.go @@ -531,3 +531,44 @@ func TestBatchUpdate(t *testing.T) { require.True(t, foundElements[i]) } } + +type keyWithPointer struct { + Pointer *uint32 + Value uint32 +} + +func TestIterateWithPointerKey(t *testing.T) { + require.NoError(t, rlimit.RemoveMemlock()) + + m, err := NewGenericMap[keyWithPointer, uint32](&ebpf.MapSpec{ + Type: ebpf.Hash, + MaxEntries: 100, + }) + require.NoError(t, err) + + numsToPut := uint32(50) + theNumber := uint32(42) + expectedNumbers := make([]uint32, numsToPut) + for i := uint32(0); i < numsToPut; i++ { + require.NoError(t, m.Put(&keyWithPointer{Pointer: &theNumber, Value: i}, &i)) + expectedNumbers[i] = i + } + + var k keyWithPointer + var v uint32 + actualNumbers := make([]uint32, numsToPut) + + // Should automatically revert to the single item iterator, as we cannot use pointers + // in batch iterators + it := m.IterateWithBatchSize(10) + require.NotNil(t, it) + for it.Next(&k, &v) { + actualNumbers[k.Value] = v + require.Equal(t, theNumber, *k.Pointer) + require.Equal(t, &theNumber, k.Pointer) + require.Equal(t, k.Value, v) + } + + require.NoError(t, it.Err()) + require.Equal(t, expectedNumbers, actualNumbers) +} diff --git a/pkg/ebpf/telemetry/errors_telemetry_test.go b/pkg/ebpf/telemetry/errors_telemetry_test.go index ceb412e3e1fc6..c03290b80a543 100644 --- a/pkg/ebpf/telemetry/errors_telemetry_test.go +++ b/pkg/ebpf/telemetry/errors_telemetry_test.go @@ -15,7 +15,7 @@ import ( "golang.org/x/sys/unix" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - aconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" manager "github.com/DataDog/ebpf-manager" @@ -48,7 +48,7 @@ type config struct { } func testConfig() *config { - cfg := aconfig.SystemProbe() + cfg := pkgconfigsetup.SystemProbe() sysconfig.Adjust(cfg) return &config{ diff --git a/pkg/ebpf/testdata/c/uprobe_attacher-test.c b/pkg/ebpf/testdata/c/uprobe_attacher-test.c new file mode 100644 index 0000000000000..bcc755a245786 --- /dev/null +++ b/pkg/ebpf/testdata/c/uprobe_attacher-test.c @@ -0,0 +1,20 @@ +// This program is used to test the UprobeAttacher object, it defines two simple probes that attach +// to userspace functions. +#include "kconfig.h" +#include "ktypes.h" +#include "bpf_metadata.h" +#include +#include "bpf_tracing.h" +#include "bpf_helpers.h" +#include "bpf_helpers_custom.h" +#include + +SEC("uprobe/SSL_connect") +int uprobe__SSL_connect(struct pt_regs *ctx) { + return 0; +} + +SEC("uprobe/main") +int uprobe__main(struct pt_regs *ctx) { + return 0; +} diff --git a/pkg/ebpf/uprobes/attacher.go b/pkg/ebpf/uprobes/attacher.go new file mode 100644 index 0000000000000..ae7c254f58dc7 --- /dev/null +++ b/pkg/ebpf/uprobes/attacher.go @@ -0,0 +1,945 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "bufio" + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + manager "github.com/DataDog/ebpf-manager" + "github.com/hashicorp/go-multierror" + "golang.org/x/exp/maps" + + "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" + "github.com/DataDog/datadog-agent/pkg/process/monitor" + "github.com/DataDog/datadog-agent/pkg/util/kernel" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// ExcludeMode defines the different optiont to exclude processes from attachment +type ExcludeMode uint8 + +const ( + // ExcludeSelf excludes the agent's own PID + ExcludeSelf ExcludeMode = 1 << iota + // ExcludeInternal excludes internal DataDog processes + ExcludeInternal + // ExcludeBuildkit excludes buildkitd processes + ExcludeBuildkit + // ExcludeContainerdTmp excludes containerd tmp mounts + ExcludeContainerdTmp +) + +var ( + // ErrSelfExcluded is returned when the PID is the same as the agent's PID. + ErrSelfExcluded = errors.New("self-excluded") + // ErrInternalDDogProcessRejected is returned when the PID is an internal datadog process. + ErrInternalDDogProcessRejected = errors.New("internal datadog process rejected") + // ErrNoMatchingRule is returned when no rule matches the shared library path. + ErrNoMatchingRule = errors.New("no matching rule") + // regex that defines internal DataDog processes + internalProcessRegex = regexp.MustCompile("datadog-agent/.*/((process|security|trace)-agent|system-probe|agent)") +) + +// AttachTarget defines the target to which we should attach the probes, libraries or executables +type AttachTarget uint8 + +const ( + // AttachToExecutable attaches to the main executable + AttachToExecutable AttachTarget = 1 << iota + // AttachToSharedLibraries attaches to shared libraries + AttachToSharedLibraries +) + +// ProbeOptions is a structure that holds the options for a probe attachment. By default +// these values will be inferred from the probe name, but the user can override them if needed. +type ProbeOptions struct { + // IsManualReturn indicates that the probe is a manual return probe, which means that the inspector + // will find the return locations of the function and attach to them instead of using uretprobes. + IsManualReturn bool + + // Symbol is the symbol name to attach the probe to. This is useful when the symbol name is not a valid + // C identifier (e.g. Go functions) + Symbol string +} + +// AttachRule defines how to attach a certain set of probes. Uprobes can be attached +// to shared libraries or executables, this structure tells the attacher which ones to +// select and to which targets to do it. +type AttachRule struct { + // LibraryNameRegex defines which libraries should be matched by this rule + LibraryNameRegex *regexp.Regexp + // ExecutableFilter is a function that receives the path of the executable and returns true if it should be matched + ExecutableFilter func(string, *ProcInfo) bool + // Targets defines the targets to which we should attach the probes, shared libraries and/or executables + Targets AttachTarget + // ProbesSelectors defines which probes should be attached and how should we validate + // the attachment (e.g., whether we need all probes active or just one of them, or in a best-effort basis) + ProbesSelector []manager.ProbesSelector + // ProbeOptionsOverride allows the user to override the options for a probe that are inferred from the name + // of the probe. This way the user can set options such as manual return detection or symbol names for probes + // whose names aren't valid C identifiers. + ProbeOptionsOverride map[string]ProbeOptions +} + +// canTarget returns true if the rule matches the given AttachTarget +func (r *AttachRule) canTarget(target AttachTarget) bool { + return r.Targets&target != 0 +} + +func (r *AttachRule) matchesLibrary(path string) bool { + return r.canTarget(AttachToSharedLibraries) && r.LibraryNameRegex != nil && r.LibraryNameRegex.MatchString(path) +} + +func (r *AttachRule) matchesExecutable(path string, procInfo *ProcInfo) bool { + return r.canTarget(AttachToExecutable) && (r.ExecutableFilter == nil || r.ExecutableFilter(path, procInfo)) +} + +// getProbeOptions returns the options for a given probe, checking if we have specific overrides +// in this rule and, if not, using the options inferred from the probe name. +func (r *AttachRule) getProbeOptions(probeID manager.ProbeIdentificationPair) (ProbeOptions, error) { + if r.ProbeOptionsOverride != nil { + if options, ok := r.ProbeOptionsOverride[probeID.EBPFFuncName]; ok { + return options, nil + } + } + + symbol, isManualReturn, err := parseSymbolFromEBPFProbeName(probeID.EBPFFuncName) + if err != nil { + return ProbeOptions{}, err + } + + return ProbeOptions{ + Symbol: symbol, + IsManualReturn: isManualReturn, + }, nil +} + +// Validate checks whether the rule is valid, returns nil if it is, an error message otherwise +func (r *AttachRule) Validate() error { + var result error + + if r.Targets == 0 { + result = multierror.Append(result, errors.New("no targets specified")) + } + + if r.canTarget(AttachToSharedLibraries) && r.LibraryNameRegex == nil { + result = multierror.Append(result, errors.New("no library name regex specified")) + } + + for _, selector := range r.ProbesSelector { + for _, probeID := range selector.GetProbesIdentificationPairList() { + _, err := r.getProbeOptions(probeID) + if err != nil { + result = multierror.Append(result, fmt.Errorf("cannot get options for probe %s: %w", probeID.EBPFFuncName, err)) + } + } + } + + return result +} + +// AttacherConfig defines the configuration for the attacher +type AttacherConfig struct { + // Rules defines a series of rules that tell the attacher how to attach the probes + Rules []*AttachRule + + // ScanProcessesInterval defines the interval at which we scan for terminated processes and new processes we haven't seen + ScanProcessesInterval time.Duration + + // EnablePeriodicScanNewProcesses defines whether the attacher should scan for new processes periodically (with ScanProcessesInterval) + EnablePeriodicScanNewProcesses bool + + // ProcRoot is the root directory of the proc filesystem + ProcRoot string + + // ExcludeTargets defines the targets that should be excluded from the attacher + ExcludeTargets ExcludeMode + + // EbpfConfig is the configuration for the eBPF program + EbpfConfig *ebpf.Config + + // PerformInitialScan defines if the attacher should perform an initial scan of the processes before starting the monitor + PerformInitialScan bool + + // ProcessMonitorEventStream defines whether the process monitor is using the event stream + ProcessMonitorEventStream bool + + // EnableDetailedLogging makes the attacher log why it's attaching or not attaching to a process + // This is useful for debugging purposes, do not enable in production. + EnableDetailedLogging bool +} + +// SetDefaults configures the AttacherConfig with default values for those fields for which the compiler +// defaults are not enough +func (ac *AttacherConfig) SetDefaults() { + if ac.ScanProcessesInterval == 0 { + ac.ScanProcessesInterval = 30 * time.Second + } + + if ac.ProcRoot == "" { + ac.ProcRoot = kernel.HostProc() + } + + if ac.EbpfConfig == nil { + ac.EbpfConfig = ebpf.NewConfig() + } +} + +// Validate checks whether the configuration is valid, returns nil if it is, an error message otherwise +func (ac *AttacherConfig) Validate() error { + var errs []string + + if ac.EbpfConfig == nil { + errs = append(errs, "missing ebpf config") + } + + if ac.ProcRoot == "" { + errs = append(errs, "missing proc root") + } + + for _, rule := range ac.Rules { + err := rule.Validate() + if err != nil { + errs = append(errs, err.Error()) + } + } + + if len(errs) == 0 { + return nil + } + + return errors.New("invalid attacher configuration: " + strings.Join(errs, ", ")) +} + +// ProbeManager is an interface that defines the methods that a Manager implements, +// so that we can replace it in tests for a mock object +type ProbeManager interface { + // AddHook adds a hook to the manager with the given UID and probe + AddHook(UID string, probe *manager.Probe) error + + // DetachHook detaches the hook with the ID pair + DetachHook(manager.ProbeIdentificationPair) error + + // GetProbe returns the probe with the given ID pair, and a boolean indicating if it was found + GetProbe(manager.ProbeIdentificationPair) (*manager.Probe, bool) +} + +// FileRegistry is an interface that defines the methods that a FileRegistry implements, so that we can replace it in tests for a mock object +type FileRegistry interface { + // Register registers a file path to be tracked by the attacher for the given PID. The registry will call the activationCB when the file is opened + // the first time, and the deactivationCB when the file is closed. If the file is already registered, the alreadyRegistered callback + // will be called instead of the activationCB. + Register(namespacedPath string, pid uint32, activationCB, deactivationCB, alreadyRegistered utils.Callback) error + + // Unregister unregisters a file path from the attacher. The deactivation callback will be called for all + // files that were registered with the given PID and aren't used anymore. + Unregister(uint32) error + + // Clear clears the registry, removing all registered files + Clear() + + // GetRegisteredProcesses returns a map of all the processes that are currently registered in the registry + GetRegisteredProcesses() map[uint32]struct{} +} + +// AttachCallback is a callback that is called whenever a probe is attached successfully +type AttachCallback func(*manager.Probe, *utils.FilePath) + +// UprobeAttacher is a struct that handles the attachment of uprobes to processes and libraries +type UprobeAttacher struct { + // name contains the name of this attacher for identification + name string + + // done is a channel to signal the attacher to stop + done chan struct{} + + // wg is a wait group to wait for the attacher to stop + wg sync.WaitGroup + + // config holds the configuration of the attacher. Not a pointer as we want + // a copy of the configuration so that the user cannot change it, as we have + // certain cached values that we have no way to invalidate if the config + // changes after the attacher is created + config AttacherConfig + + // fileRegistry is used to keep track of the files we are attached to, and attach only once to each file + fileRegistry FileRegistry + + // manager is used to manage the eBPF probes (attach/detach to processes) + manager ProbeManager + + // inspector is used extract the metadata from the binaries + inspector BinaryInspector + + // pathToAttachedProbes maps a filesystem path to the probes attached to it. + // Used to detach them once the path is no longer used. + pathToAttachedProbes map[string][]manager.ProbeIdentificationPair + + // onAttachCallback is a callback that is called whenever a probe is attached + onAttachCallback AttachCallback + + // soWatcher is the program that launches events whenever shared libraries are + // opened + soWatcher *sharedlibraries.EbpfProgram + + // handlesLibrariesCached is a cache for the handlesLibraries function, avoiding + // recomputation every time + handlesLibrariesCached *bool + + // handlesExecutablesCached is a cache for the handlesExecutables function, avoiding + // recomputation every time + handlesExecutablesCached *bool +} + +// NewUprobeAttacher creates a new UprobeAttacher. Receives as arguments the +// name of the attacher, the configuration, the probe manager (ebpf.Manager +// usually), a callback to be called whenever a probe is attached (optional, can +// be nil), and the binary inspector to be used (e.g., while we usually want +// NativeBinaryInspector here, we might want the GoBinaryInspector to attach to +// Go functions in a different way). +// Note that the config is copied, not referenced. The attacher caches some values +// that depend on the configuration, so any changes to the configuration after the +// attacher would make those caches incoherent. This way we ensure that the attacher +// is always consistent with the configuration it was created with. +func NewUprobeAttacher(name string, config AttacherConfig, mgr ProbeManager, onAttachCallback AttachCallback, inspector BinaryInspector) (*UprobeAttacher, error) { + config.SetDefaults() + + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("invalid attacher configuration: %w", err) + } + + ua := &UprobeAttacher{ + name: name, + config: config, + fileRegistry: utils.NewFileRegistry(name), + manager: mgr, + onAttachCallback: onAttachCallback, + pathToAttachedProbes: make(map[string][]manager.ProbeIdentificationPair), + done: make(chan struct{}), + inspector: inspector, + } + + utils.AddAttacher(name, ua) + + return ua, nil +} + +// handlesLibraries returns whether the attacher has rules configured to attach to shared libraries. +// It caches the result to avoid recalculating it every time we are attaching to a PID. +func (ua *UprobeAttacher) handlesLibraries() bool { + if ua.handlesLibrariesCached != nil { + return *ua.handlesLibrariesCached + } + + result := false + for _, rule := range ua.config.Rules { + if rule.canTarget(AttachToSharedLibraries) { + result = true + break + } + } + ua.handlesLibrariesCached = &result + return result +} + +// handlesLibraries returns whether the attacher has rules configured to attach to executables directly +// It caches the result to avoid recalculating it every time we are attaching to a PID. +func (ua *UprobeAttacher) handlesExecutables() bool { + if ua.handlesExecutablesCached != nil { + return *ua.handlesExecutablesCached + } + + result := false + for _, rule := range ua.config.Rules { + if rule.canTarget(AttachToExecutable) { + result = true + break + } + } + ua.handlesExecutablesCached = &result + return result +} + +// Start starts the attacher, attaching to the processes and libraries as needed +func (ua *UprobeAttacher) Start() error { + var cleanupExec, cleanupExit func() + procMonitor := monitor.GetProcessMonitor() + err := procMonitor.Initialize(ua.config.ProcessMonitorEventStream) + if err != nil { + return fmt.Errorf("error initializing process monitor: %w", err) + } + + if ua.handlesExecutables() { + cleanupExec = procMonitor.SubscribeExec(ua.handleProcessStart) + } + // We always want to track process deletions, to avoid memory leaks + cleanupExit = procMonitor.SubscribeExit(ua.handleProcessExit) + + if ua.handlesLibraries() { + if !sharedlibraries.IsSupported(ua.config.EbpfConfig) { + return errors.New("shared libraries tracing not supported for this platform") + } + + ua.soWatcher = sharedlibraries.NewEBPFProgram(ua.config.EbpfConfig) + + err := ua.soWatcher.Init() + if err != nil { + return fmt.Errorf("error initializing shared library program: %w", err) + } + err = ua.soWatcher.Start() + if err != nil { + return fmt.Errorf("error starting shared library program: %w", err) + } + } + + if ua.config.PerformInitialScan { + // Initial scan only looks at existing processes, and as it's the first scan + // we don't have to track deletions + err := ua.Sync(true, false) + if err != nil { + return fmt.Errorf("error during initial scan: %w", err) + } + } + + ua.wg.Add(1) + go func() { + processSync := time.NewTicker(ua.config.ScanProcessesInterval) + + defer func() { + processSync.Stop() + if cleanupExec != nil { + cleanupExec() + } + cleanupExit() + procMonitor.Stop() + ua.fileRegistry.Clear() + if ua.soWatcher != nil { + ua.soWatcher.Stop() + } + ua.wg.Done() + log.Infof("uprobe attacher %s stopped", ua.name) + }() + + var sharedLibDataChan <-chan ebpf.DataEvent + var sharedLibLostChan <-chan uint64 + + if ua.soWatcher != nil { + sharedLibDataChan = ua.soWatcher.GetPerfHandler().DataChannel() + sharedLibLostChan = ua.soWatcher.GetPerfHandler().LostChannel() + } + + for { + select { + case <-ua.done: + return + case <-processSync.C: + // We always track process deletions in the scan, to avoid memory leaks. + _ = ua.Sync(ua.config.EnablePeriodicScanNewProcesses, true) + case event, ok := <-sharedLibDataChan: + if !ok { + return + } + _ = ua.handleLibraryOpen(&event) + case <-sharedLibLostChan: + // Nothing to do in this case + break + } + } + }() + log.Infof("uprobe attacher %s started", ua.name) + + return nil +} + +// Sync scans the proc filesystem for new processes and detaches from terminated ones +func (ua *UprobeAttacher) Sync(trackCreations, trackDeletions bool) error { + if !trackDeletions && !trackCreations { + return nil // Nothing to do + } + + var deletionCandidates map[uint32]struct{} + if trackDeletions { + deletionCandidates = ua.fileRegistry.GetRegisteredProcesses() + } + thisPID, err := kernel.RootNSPID() + if err != nil { + return err + } + + _ = kernel.WithAllProcs(ua.config.ProcRoot, func(pid int) error { + if pid == thisPID { // don't scan ourselves + return nil + } + + if trackDeletions { + if _, ok := deletionCandidates[uint32(pid)]; ok { + // We have previously hooked into this process and it remains active, + // so we remove it from the deletionCandidates list, and move on to the next PID + delete(deletionCandidates, uint32(pid)) + return nil + } + } + + if trackCreations { + // This is a new PID so we attempt to attach SSL probes to it + _ = ua.AttachPID(uint32(pid)) + } + return nil + }) + + if trackDeletions { + // At this point all entries from deletionCandidates are no longer alive, so + // we should detach our SSL probes from them + for pid := range deletionCandidates { + ua.handleProcessExit(pid) + } + } + + return nil +} + +// Stop stops the attacher +func (ua *UprobeAttacher) Stop() { + close(ua.done) + ua.wg.Wait() +} + +// handleProcessStart is called when a new process is started, wraps AttachPIDWithOptions but ignoring the error +// for API compatibility with processMonitor +func (ua *UprobeAttacher) handleProcessStart(pid uint32) { + _ = ua.AttachPIDWithOptions(pid, false) // Do not try to attach to libraries on process start, it hasn't loaded them yet +} + +// handleProcessExit is called when a process finishes, wraps DetachPID but ignoring the error +// for API compatibility with processMonitor +func (ua *UprobeAttacher) handleProcessExit(pid uint32) { + _ = ua.DetachPID(pid) +} + +func (ua *UprobeAttacher) handleLibraryOpen(event *ebpf.DataEvent) error { + defer event.Done() + + libpath := sharedlibraries.ToLibPath(event.Data) + path := sharedlibraries.ToBytes(&libpath) + + return ua.AttachLibrary(string(path), libpath.Pid) +} + +func (ua *UprobeAttacher) buildRegisterCallbacks(matchingRules []*AttachRule, procInfo *ProcInfo) (func(utils.FilePath) error, func(utils.FilePath) error) { + registerCB := func(p utils.FilePath) error { + err := ua.attachToBinary(p, matchingRules, procInfo) + if ua.config.EnableDetailedLogging { + log.Debugf("uprobes: attaching to %s (PID %d): err=%v", p.HostPath, procInfo.PID, err) + } + return err + } + unregisterCB := func(p utils.FilePath) error { + err := ua.detachFromBinary(p) + if ua.config.EnableDetailedLogging { + log.Debugf("uprobes: detaching from %s (PID %d): err=%v", p.HostPath, p.PID, err) + } + return err + } + + return registerCB, unregisterCB +} + +// AttachLibrary attaches the probes to the given library, opened by a given PID +func (ua *UprobeAttacher) AttachLibrary(path string, pid uint32) error { + if (ua.config.ExcludeTargets&ExcludeSelf) != 0 && int(pid) == os.Getpid() { + return ErrSelfExcluded + } + + matchingRules := ua.getRulesForLibrary(path) + if len(matchingRules) == 0 { + return ErrNoMatchingRule + } + + registerCB, unregisterCB := ua.buildRegisterCallbacks(matchingRules, NewProcInfo(ua.config.ProcRoot, pid)) + + return ua.fileRegistry.Register(path, pid, registerCB, unregisterCB, utils.IgnoreCB) +} + +// getRulesForLibrary returns the rules that match the given library path +func (ua *UprobeAttacher) getRulesForLibrary(path string) []*AttachRule { + var matchedRules []*AttachRule + + for _, rule := range ua.config.Rules { + if rule.matchesLibrary(path) { + matchedRules = append(matchedRules, rule) + } + } + return matchedRules +} + +// getRulesForExecutable returns the rules that match the given executable +func (ua *UprobeAttacher) getRulesForExecutable(path string, procInfo *ProcInfo) []*AttachRule { + var matchedRules []*AttachRule + + for _, rule := range ua.config.Rules { + if rule.matchesExecutable(path, procInfo) { + matchedRules = append(matchedRules, rule) + } + } + return matchedRules +} + +var errIterationStart = errors.New("iteration start") + +// getExecutablePath resolves the executable of the given PID looking in procfs. Automatically +// handles delays in procfs updates. Will return an error if the path cannot be resolved +func (ua *UprobeAttacher) getExecutablePath(pid uint32) (string, error) { + pidAsStr := strconv.FormatUint(uint64(pid), 10) + exePath := filepath.Join(ua.config.ProcRoot, pidAsStr, "exe") + + var binPath string + err := errIterationStart + end := time.Now().Add(procFSUpdateTimeout) + + for err != nil && end.After(time.Now()) { + binPath, err = os.Readlink(exePath) + if err != nil { + time.Sleep(time.Millisecond) + } + } + + if err != nil { + return "", err + } + + return binPath, nil +} + +const optionAttachToLibs = true + +// AttachPID attaches the corresponding probes to a given pid +func (ua *UprobeAttacher) AttachPID(pid uint32) error { + return ua.AttachPIDWithOptions(pid, optionAttachToLibs) +} + +// AttachPIDWithOptions attaches the corresponding probes to a given pid +func (ua *UprobeAttacher) AttachPIDWithOptions(pid uint32, attachToLibs bool) error { + if (ua.config.ExcludeTargets&ExcludeSelf) != 0 && int(pid) == os.Getpid() { + return ErrSelfExcluded + } + + procInfo := NewProcInfo(ua.config.ProcRoot, pid) + + // Only compute the binary path if we are going to need it. It's better to do these two checks + // (which are cheak, the handlesExecutables function is cached) than to do the syscall + // every time + var binPath string + var err error + if ua.handlesExecutables() || (ua.config.ExcludeTargets&ExcludeInternal) != 0 { + binPath, err = procInfo.Exe() + if err != nil { + return err + } + } + + if (ua.config.ExcludeTargets&ExcludeInternal) != 0 && internalProcessRegex.MatchString(binPath) { + return ErrInternalDDogProcessRejected + } + + if ua.handlesExecutables() { + matchingRules := ua.getRulesForExecutable(binPath, procInfo) + + if len(matchingRules) != 0 { + registerCB, unregisterCB := ua.buildRegisterCallbacks(matchingRules, procInfo) + err = ua.fileRegistry.Register(binPath, pid, registerCB, unregisterCB, utils.IgnoreCB) + if err != nil { + return err + } + } + } + + if attachToLibs && ua.handlesLibraries() { + return ua.attachToLibrariesOfPID(pid) + } + + return nil +} + +// DetachPID detaches the uprobes attached to a PID +func (ua *UprobeAttacher) DetachPID(pid uint32) error { + return ua.fileRegistry.Unregister(pid) +} + +const buildKitProcessName = "buildkitd" + +func isBuildKit(procInfo *ProcInfo) bool { + comm, err := procInfo.Comm() + if err != nil { + return false + } + return strings.HasPrefix(comm, buildKitProcessName) +} + +func isContainerdTmpMount(path string) bool { + return strings.Contains(path, "tmpmounts/containerd-mount") +} + +// getUID() return a key of length 5 as the kernel uprobe registration path is limited to a length of 64 +// ebpf-manager/utils.go:GenerateEventName() MaxEventNameLen = 64 +// MAX_EVENT_NAME_LEN (linux/kernel/trace/trace.h) +// +// Length 5 is arbitrary value as the full string of the eventName format is +// +// fmt.Sprintf("%s_%.*s_%s_%s", probeType, maxFuncNameLen, functionName, UID, attachPIDstr) +// +// functionName is variable but with a minimum guarantee of 10 chars +func getUID(lib utils.PathIdentifier) string { + return lib.Key()[:5] +} + +func parseSymbolFromEBPFProbeName(probeName string) (symbol string, isManualReturn bool, err error) { + parts := strings.Split(probeName, "__") + if len(parts) < 2 { + err = fmt.Errorf("invalid probe name %s, no double underscore (__) separating probe type and function name", probeName) + return + } + + symbol = parts[1] + if len(parts) > 2 { + if parts[2] == "return" { + isManualReturn = true + } else { + err = fmt.Errorf("invalid probe name %s, unexpected third part %s. Format should be probeType__funcName[__return]", probeName, parts[2]) + return + } + } + + return +} + +// attachToBinary attaches the probes to the given binary. Important: it does not perform any cleanup on failure. +// This is to match the behavior of the FileRegistry, which will call the deactivation callback on failure of the registration +// callback. +func (ua *UprobeAttacher) attachToBinary(fpath utils.FilePath, matchingRules []*AttachRule, procInfo *ProcInfo) error { + if ua.config.ExcludeTargets&ExcludeBuildkit != 0 && isBuildKit(procInfo) { + return fmt.Errorf("process %d is buildkitd, skipping", fpath.PID) + } else if ua.config.ExcludeTargets&ExcludeContainerdTmp != 0 && isContainerdTmpMount(fpath.HostPath) { + return fmt.Errorf("path %s from process %d is tempmount of containerd, skipping", fpath.HostPath, fpath.PID) + } + + symbolsToRequest, err := ua.computeSymbolsToRequest(matchingRules) + if err != nil { + return fmt.Errorf("error computing symbols to request for rules %+v: %w", matchingRules, err) + } + + inspectResult, isAttachable, err := ua.inspector.Inspect(fpath, symbolsToRequest) + if err != nil { + return fmt.Errorf("error inspecting %s: %w", fpath.HostPath, err) + } + if !isAttachable { + return fmt.Errorf("incompatible binary %s", fpath.HostPath) + } + + uid := getUID(fpath.ID) + + for _, rule := range matchingRules { + for _, selector := range rule.ProbesSelector { + err = ua.attachProbeSelector(selector, fpath, uid, rule, inspectResult) + if err != nil { + return err + } + } + } + + return nil +} + +func (ua *UprobeAttacher) attachProbeSelector(selector manager.ProbesSelector, fpath utils.FilePath, fpathUID string, rule *AttachRule, inspectResult map[string]bininspect.FunctionMetadata) error { + _, isBestEffort := selector.(*manager.BestEffort) + + for _, probeID := range selector.GetProbesIdentificationPairList() { + probeOpts, err := rule.getProbeOptions(probeID) + if err != nil { + return fmt.Errorf("error parsing probe name %s: %w", probeID.EBPFFuncName, err) + } + + data, found := inspectResult[probeOpts.Symbol] + if !found { + if isBestEffort { + return nil + } + // This should not happen, as Inspect should have already + // returned an error if mandatory symbols weren't found. + // However and for safety, we'll check again and return an + // error if the symbol is not found. + return fmt.Errorf("symbol %s not found in %s", probeOpts.Symbol, fpath.HostPath) + } + + var locationsToAttach []uint64 + var probeTypeCode string // to make unique UIDs between return/non-return probes + if probeOpts.IsManualReturn { + locationsToAttach = data.ReturnLocations + probeTypeCode = "r" + } else { + locationsToAttach = []uint64{data.EntryLocation} + probeTypeCode = "d" + } + + for i, location := range locationsToAttach { + newProbeID := manager.ProbeIdentificationPair{ + EBPFFuncName: probeID.EBPFFuncName, + UID: fmt.Sprintf("%s%s%d", fpathUID, probeTypeCode, i), // Make UID unique even if we have multiple locations + } + + probe, found := ua.manager.GetProbe(newProbeID) + if found { + // We have already probed this process, just ensure it's running and skip it + if !probe.IsRunning() { + err := probe.Attach() + if err != nil { + return fmt.Errorf("cannot attach running probe %v: %w", newProbeID, err) + } + } + if ua.config.EnableDetailedLogging { + log.Debugf("Probe %v already attached to %s", newProbeID, fpath.HostPath) + } + continue + } + + newProbe := &manager.Probe{ + ProbeIdentificationPair: newProbeID, + BinaryPath: fpath.HostPath, + UprobeOffset: location, + HookFuncName: probeOpts.Symbol, + } + err = ua.manager.AddHook("", newProbe) + if err != nil { + return fmt.Errorf("error attaching probe %+v: %w", newProbe, err) + } + + ebpf.AddProgramNameMapping(newProbe.ID(), newProbe.EBPFFuncName, ua.name) + ua.pathToAttachedProbes[fpath.HostPath] = append(ua.pathToAttachedProbes[fpath.HostPath], newProbeID) + + if ua.onAttachCallback != nil { + ua.onAttachCallback(newProbe, &fpath) + } + + // Update the probe IDs with the new UID, so that the validator can find them + // correctly (we're changing UIDs every time) + selector.EditProbeIdentificationPair(probeID, newProbeID) + + if ua.config.EnableDetailedLogging { + log.Debugf("Attached probe %v to %s (PID %d)", newProbeID, fpath.HostPath, fpath.PID) + } + } + } + + manager, ok := ua.manager.(*manager.Manager) + if ok { + if err := selector.RunValidator(manager); err != nil { + return fmt.Errorf("error validating probes: %w", err) + } + } + + return nil +} + +func (ua *UprobeAttacher) computeSymbolsToRequest(rules []*AttachRule) ([]SymbolRequest, error) { + var requests []SymbolRequest + for _, rule := range rules { + for _, selector := range rule.ProbesSelector { + _, isBestEffort := selector.(*manager.BestEffort) + for _, selector := range selector.GetProbesIdentificationPairList() { + opts, err := rule.getProbeOptions(selector) + if err != nil { + return nil, fmt.Errorf("error parsing probe name %s: %w", selector.EBPFFuncName, err) + } + + requests = append(requests, SymbolRequest{ + Name: opts.Symbol, + IncludeReturnLocations: opts.IsManualReturn, + BestEffort: isBestEffort, + }) + } + } + } + + return requests, nil +} + +func (ua *UprobeAttacher) detachFromBinary(fpath utils.FilePath) error { + for _, probeID := range ua.pathToAttachedProbes[fpath.HostPath] { + err := ua.manager.DetachHook(probeID) + if err != nil { + return fmt.Errorf("error detaching probe %+v: %w", probeID, err) + } + } + + ua.inspector.Cleanup(fpath) + + return nil +} + +func (ua *UprobeAttacher) getLibrariesFromMapsFile(pid int) ([]string, error) { + mapsPath := filepath.Join(ua.config.ProcRoot, strconv.Itoa(pid), "maps") + mapsFile, err := os.Open(mapsPath) + if err != nil { + return nil, fmt.Errorf("cannot open maps file at %s: %w", mapsPath, err) + } + defer mapsFile.Close() + + scanner := bufio.NewScanner(bufio.NewReader(mapsFile)) + libs := make(map[string]struct{}) + for scanner.Scan() { + line := scanner.Text() + cols := strings.Fields(line) + // ensuring we have exactly 6 elements (skip '(deleted)' entries) in the line, and the 4th element (inode) is + // not zero (indicates it is a path, and not an anonymous path). + if len(cols) == 6 && cols[4] != "0" { + libs[cols[5]] = struct{}{} + } + } + + return maps.Keys(libs), nil +} + +func (ua *UprobeAttacher) attachToLibrariesOfPID(pid uint32) error { + registerErrors := make([]error, 0) + successfulMatches := make([]string, 0) + libs, err := ua.getLibrariesFromMapsFile(int(pid)) + if err != nil { + return err + } + for _, libpath := range libs { + err := ua.AttachLibrary(libpath, pid) + + if err == nil { + successfulMatches = append(successfulMatches, libpath) + } else if !errors.Is(err, ErrNoMatchingRule) { + registerErrors = append(registerErrors, err) + } + } + + if len(successfulMatches) == 0 { + if len(registerErrors) == 0 { + return nil // No libraries found to attach + } + return fmt.Errorf("no rules matched for pid %d, errors: %v", pid, registerErrors) + } + if len(registerErrors) > 0 { + return fmt.Errorf("partially hooked (%v), errors while attaching pid %d: %v", successfulMatches, pid, registerErrors) + } + return nil +} diff --git a/pkg/ebpf/uprobes/attacher_test.go b/pkg/ebpf/uprobes/attacher_test.go new file mode 100644 index 0000000000000..e71a62d771306 --- /dev/null +++ b/pkg/ebpf/uprobes/attacher_test.go @@ -0,0 +1,935 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + manager "github.com/DataDog/ebpf-manager" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" + eventmonitortestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/testutil" + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries" + fileopener "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries/testutil" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" + "github.com/DataDog/datadog-agent/pkg/process/monitor" + procmontestutil "github.com/DataDog/datadog-agent/pkg/process/monitor/testutil" + "github.com/DataDog/datadog-agent/pkg/util/kernel" +) + +// === Tests + +func TestCanCreateAttacher(t *testing.T) { + ua, err := NewUprobeAttacher("mock", AttacherConfig{}, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) +} + +func TestAttachPidExcludesInternal(t *testing.T) { + exe := "datadog-agent/bin/system-probe" + procRoot := CreateFakeProcFS(t, []FakeProcFSEntry{{Pid: 1, Cmdline: exe, Command: exe, Exe: exe}}) + config := AttacherConfig{ + ExcludeTargets: ExcludeInternal, + ProcRoot: procRoot, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + err = ua.AttachPIDWithOptions(1, false) + require.ErrorIs(t, err, ErrInternalDDogProcessRejected) +} + +func TestAttachPidExcludesSelf(t *testing.T) { + config := AttacherConfig{ + ExcludeTargets: ExcludeSelf, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + err = ua.AttachPIDWithOptions(uint32(os.Getpid()), false) + require.ErrorIs(t, err, ErrSelfExcluded) +} + +func TestGetExecutablePath(t *testing.T) { + exe := "/bin/bash" + procRoot := CreateFakeProcFS(t, []FakeProcFSEntry{{Pid: 1, Cmdline: "", Command: exe, Exe: exe}}) + config := AttacherConfig{ + ProcRoot: procRoot, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + path, err := ua.getExecutablePath(1) + require.NoError(t, err, "failed to get executable path for existing PID") + require.Equal(t, path, exe) + + path, err = ua.getExecutablePath(404) + require.Error(t, err, "should fail to get executable path for non-existing PID") + require.Empty(t, path, "should return empty path for non-existing PID") +} + +const mapsFileSample = ` +08048000-08049000 r-xp 00000000 03:00 8312 /opt/test +08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test +0804a000-0806b000 rw-p 00000000 00:00 0 [heap] +a7cb1000-a7cb2000 ---p 00000000 00:00 0 +a7cb2000-a7eb2000 rw-p 00000000 00:00 0 +a7eb2000-a7eb3000 ---p 00000000 00:00 0 +a7eb3000-a7ed5000 rw-p 00000000 00:00 0 +a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 +a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6 +a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6 +a800b000-a800e000 rw-p 00000000 00:00 0 +a800e000-a8022000 r-xp 00000000 03:00 14462 /lib/libpthread.so.0 +a8022000-a8023000 r--p 00013000 03:00 14462 /lib/libpthread.so.0 +a8023000-a8024000 rw-p 00014000 03:00 14462 /lib/libpthread.so.0 +a8024000-a8027000 rw-p 00000000 00:00 0 +a8027000-a8043000 r-xp 00000000 03:00 8317 /lib/ld-linux.so.2 +a8043000-a8044000 r--p 0001b000 03:00 8317 /lib/ld-linux.so.2 +a8044000-a8045000 rw-p 0001c000 03:00 8317 /lib/ld-linux.so.2 +aff35000-aff4a000 rw-p 00000000 00:00 0 [stack] +ffffe000-fffff000 r-xp 00000000 00:00 0 [vdso] +01c00000-02000000 rw-p 00000000 00:0d 6123886 /anon_hugepage (deleted) +` + +func TestGetLibrariesFromMapsFile(t *testing.T) { + pid := 1 + procRoot := CreateFakeProcFS(t, []FakeProcFSEntry{{Pid: uint32(pid), Maps: mapsFileSample}}) + config := AttacherConfig{ + ProcRoot: procRoot, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + libs, err := ua.getLibrariesFromMapsFile(pid) + require.NoError(t, err, "failed to get libraries from maps file") + require.NotEmpty(t, libs, "should return libraries from maps file") + expectedLibs := []string{"/opt/test", "/lib/libc.so.6", "/lib/libpthread.so.0", "/lib/ld-linux.so.2"} + require.ElementsMatch(t, expectedLibs, libs) +} + +func TestComputeRequestedSymbols(t *testing.T) { + ua, err := NewUprobeAttacher("mock", AttacherConfig{}, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + selectorsOnlyAllOf := []manager.ProbesSelector{ + &manager.AllOf{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"}}, + }, + }, + } + + t.Run("OnlyMandatory", func(tt *testing.T) { + rules := []*AttachRule{{ProbesSelector: selectorsOnlyAllOf}} + requested, err := ua.computeSymbolsToRequest(rules) + require.NoError(tt, err) + require.ElementsMatch(tt, []SymbolRequest{{Name: "SSL_connect"}}, requested) + }) + + selectorsBestEffortAndMandatory := []manager.ProbesSelector{ + &manager.AllOf{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"}}, + }, + }, + &manager.BestEffort{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__ThisFunctionDoesNotExistEver"}}, + }, + }, + } + + t.Run("MandatoryAndBestEffort", func(tt *testing.T) { + rules := []*AttachRule{{ProbesSelector: selectorsBestEffortAndMandatory}} + requested, err := ua.computeSymbolsToRequest(rules) + require.NoError(tt, err) + require.ElementsMatch(tt, []SymbolRequest{{Name: "SSL_connect"}, {Name: "ThisFunctionDoesNotExistEver", BestEffort: true}}, requested) + }) + + selectorsBestEffort := []manager.ProbesSelector{ + &manager.BestEffort{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"}}, + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__ThisFunctionDoesNotExistEver"}}, + }, + }, + } + + t.Run("OnlyBestEffort", func(tt *testing.T) { + rules := []*AttachRule{{ProbesSelector: selectorsBestEffort}} + requested, err := ua.computeSymbolsToRequest(rules) + require.NoError(tt, err) + require.ElementsMatch(tt, []SymbolRequest{{Name: "SSL_connect", BestEffort: true}, {Name: "ThisFunctionDoesNotExistEver", BestEffort: true}}, requested) + }) + + selectorsWithReturnFunctions := []manager.ProbesSelector{ + &manager.AllOf{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect__return"}}, + }, + }, + } + + t.Run("SelectorsWithReturnFunctions", func(tt *testing.T) { + rules := []*AttachRule{{ProbesSelector: selectorsWithReturnFunctions}} + requested, err := ua.computeSymbolsToRequest(rules) + require.NoError(tt, err) + require.ElementsMatch(tt, []SymbolRequest{{Name: "SSL_connect", IncludeReturnLocations: true}}, requested) + }) +} + +func TestStartAndStopWithoutLibraryWatcher(t *testing.T) { + ua, err := NewUprobeAttacher("mock", AttacherConfig{}, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + err = ua.Start() + require.NoError(t, err) + + ua.Stop() +} + +func TestStartAndStopWithLibraryWatcher(t *testing.T) { + ebpfCfg := ddebpf.NewConfig() + require.NotNil(t, ebpfCfg) + if !sharedlibraries.IsSupported(ebpfCfg) { + t.Skip("Kernel version does not support shared libraries") + return + } + + rules := []*AttachRule{{LibraryNameRegex: regexp.MustCompile(`libssl.so`), Targets: AttachToSharedLibraries}} + ua, err := NewUprobeAttacher("mock", AttacherConfig{Rules: rules, EbpfConfig: ebpfCfg}, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + require.True(t, ua.handlesLibraries()) + + err = ua.Start() + require.NoError(t, err) + require.NotNil(t, ua.soWatcher) + + ua.Stop() +} + +func TestRuleMatches(t *testing.T) { + t.Run("Library", func(tt *testing.T) { + rule := AttachRule{ + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + Targets: AttachToSharedLibraries, + } + require.True(tt, rule.matchesLibrary("pkg/network/usm/testdata/site-packages/dd-trace/libssl.so.arm64")) + require.False(tt, rule.matchesExecutable("pkg/network/usm/testdata/site-packages/dd-trace/libssl.so.arm64", nil)) + }) + + t.Run("Executable", func(tt *testing.T) { + rule := AttachRule{ + Targets: AttachToExecutable, + } + require.False(tt, rule.matchesLibrary("/bin/bash")) + require.True(tt, rule.matchesExecutable("/bin/bash", nil)) + }) + + t.Run("ExecutableWithFuncFilter", func(tt *testing.T) { + rule := AttachRule{ + Targets: AttachToExecutable, + ExecutableFilter: func(path string, _ *ProcInfo) bool { + return strings.Contains(path, "bash") + }, + } + require.False(tt, rule.matchesLibrary("/bin/bash")) + require.True(tt, rule.matchesExecutable("/bin/bash", nil)) + require.False(tt, rule.matchesExecutable("/bin/thing", nil)) + }) +} + +func TestMonitor(t *testing.T) { + ebpfCfg := ddebpf.NewConfig() + require.NotNil(t, ebpfCfg) + if !sharedlibraries.IsSupported(ebpfCfg) { + t.Skip("Kernel version does not support shared libraries") + return + } + + config := AttacherConfig{ + Rules: []*AttachRule{{ + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + Targets: AttachToExecutable | AttachToSharedLibraries, + }}, + ProcessMonitorEventStream: false, + EbpfConfig: ebpfCfg, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry to return on any calls, we will check the values later + mockRegistry.On("Clear").Return() + mockRegistry.On("Unregister", mock.Anything).Return(nil) + mockRegistry.On("Register", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + lib := getLibSSLPath(t) + + require.NoError(t, ua.Start()) + t.Cleanup(ua.Stop) + + cmd, err := fileopener.OpenFromAnotherProcess(t, lib) + require.NoError(t, err) + require.Eventually(t, func() bool { + return methodHasBeenCalledAtLeastTimes(mockRegistry, "Register", 2) + }, 1500*time.Millisecond, 10*time.Millisecond, "received calls %v", mockRegistry.Calls) + + mockRegistry.AssertCalled(t, "Register", lib, uint32(cmd.Process.Pid), mock.Anything, mock.Anything, mock.Anything) + mockRegistry.AssertCalled(t, "Register", cmd.Path, uint32(cmd.Process.Pid), mock.Anything, mock.Anything, mock.Anything) +} + +func TestSync(t *testing.T) { + selfPID, err := kernel.RootNSPID() + require.NoError(t, err) + rules := []*AttachRule{{ + Targets: AttachToExecutable | AttachToSharedLibraries, + LibraryNameRegex: regexp.MustCompile(`.*`), + ExecutableFilter: func(path string, _ *ProcInfo) bool { return !strings.Contains(path, "donttrack") }, + }} + + t.Run("DetectsExistingProcesses", func(tt *testing.T) { + procs := []FakeProcFSEntry{ + {Pid: 1, Cmdline: "/bin/bash", Command: "/bin/bash", Exe: "/bin/bash"}, + {Pid: 2, Cmdline: "/bin/bash", Command: "/bin/bash", Exe: "/bin/bash"}, + {Pid: 3, Cmdline: "/bin/donttrack", Command: "/bin/donttrack", Exe: "/bin/donttrack"}, + {Pid: uint32(selfPID), Cmdline: "datadog-agent/bin/system-probe", Command: "sysprobe", Exe: "sysprobe"}, + } + procFS := CreateFakeProcFS(t, procs) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: rules, + EnablePeriodicScanNewProcesses: true, + } + + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(tt, err) + require.NotNil(tt, ua) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry which two processes to expect + mockRegistry.On("Register", "/bin/bash", uint32(1), mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockRegistry.On("Register", "/bin/bash", uint32(2), mock.Anything, mock.Anything, mock.Anything).Return(nil) + + err = ua.Sync(true, false) + require.NoError(tt, err) + + mockRegistry.AssertExpectations(tt) + }) + + t.Run("RemovesDeletedProcesses", func(tt *testing.T) { + procs := []FakeProcFSEntry{ + {Pid: 1, Cmdline: "/bin/bash", Command: "/bin/bash", Exe: "/bin/bash"}, + {Pid: 2, Cmdline: "/bin/bash", Command: "/bin/bash", Exe: "/bin/bash"}, + {Pid: 3, Cmdline: "/bin/donttrack", Command: "/bin/donttrack", Exe: "/bin/donttrack"}, + {Pid: uint32(selfPID), Cmdline: "datadog-agent/bin/system-probe", Command: "sysprobe", Exe: "sysprobe"}, + } + procFS := CreateFakeProcFS(t, procs) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: rules, + EnablePeriodicScanNewProcesses: true, + } + + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(tt, err) + require.NotNil(tt, ua) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry which two processes to expect + mockRegistry.On("Register", "/bin/bash", uint32(1), mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockRegistry.On("Register", "/bin/bash", uint32(2), mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockRegistry.On("GetRegisteredProcesses").Return(map[uint32]struct{}{}) + + err = ua.Sync(true, true) + require.NoError(tt, err) + mockRegistry.AssertExpectations(tt) + + // Now remove one process + require.NoError(t, os.RemoveAll(filepath.Join(procFS, "2"))) + mockRegistry.ExpectedCalls = nil // Clear expected calls + mockRegistry.On("GetRegisteredProcesses").Return(map[uint32]struct{}{1: {}, 2: {}}) + mockRegistry.On("Unregister", uint32(2)).Return(nil) + + require.NoError(t, ua.Sync(true, true)) + mockRegistry.AssertExpectations(tt) + }) +} + +func TestParseSymbolFromEBPFProbeName(t *testing.T) { + t.Run("ValidName", func(tt *testing.T) { + name := "uprobe__SSL_connect" + symbol, manualReturn, err := parseSymbolFromEBPFProbeName(name) + require.NoError(tt, err) + require.False(tt, manualReturn) + require.Equal(tt, "SSL_connect", symbol) + }) + t.Run("ValidNameWithReturnMarker", func(tt *testing.T) { + name := "uprobe__SSL_connect__return" + symbol, manualReturn, err := parseSymbolFromEBPFProbeName(name) + require.NoError(tt, err) + require.True(tt, manualReturn) + require.Equal(tt, "SSL_connect", symbol) + }) + t.Run("InvalidNameWithUnrecognizedThirdPart", func(tt *testing.T) { + name := "uprobe__SSL_connect__something" + _, _, err := parseSymbolFromEBPFProbeName(name) + require.Error(tt, err) + }) + t.Run("InvalidNameNoSymbol", func(tt *testing.T) { + name := "nothing" + _, _, err := parseSymbolFromEBPFProbeName(name) + require.Error(tt, err) + }) +} + +func TestAttachToBinaryAndDetach(t *testing.T) { + proc := FakeProcFSEntry{ + Pid: 1, + Cmdline: "/bin/bash", + Exe: "/bin/bash", + } + procFS := CreateFakeProcFS(t, []FakeProcFSEntry{proc}) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: []*AttachRule{ + { + Targets: AttachToExecutable, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"}}, + }, + }, + }, + } + + mockMan := &MockManager{} + inspector := &MockBinaryInspector{} + ua, err := NewUprobeAttacher("mock", config, mockMan, nil, inspector) + require.NoError(t, err) + require.NotNil(t, ua) + + target := utils.FilePath{ + HostPath: proc.Exe, + PID: proc.Pid, + } + + // Tell the inspector to return a simple symbol + symbolToAttach := bininspect.FunctionMetadata{EntryLocation: 0x1234} + inspector.On("Inspect", target, mock.Anything).Return(map[string]bininspect.FunctionMetadata{"SSL_connect": symbolToAttach}, true, nil) + inspector.On("Cleanup", mock.Anything).Return(nil) + + // Tell the manager to return no probe when finding an existing one + var nilProbe *manager.Probe // we can't just pass nil directly, if we do that the mock cannot convert it to *manager.Probe + mockMan.On("GetProbe", mock.Anything).Return(nilProbe, false) + + // Tell the manager to accept the probe + uid := "1hipfd0" // this is the UID that the manager will generate, from a path identifier with 0/0 as device/inode + expectedProbe := &manager.Probe{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect", UID: uid}, + BinaryPath: target.HostPath, + UprobeOffset: symbolToAttach.EntryLocation, + HookFuncName: "SSL_connect", + } + mockMan.On("AddHook", mock.Anything, expectedProbe).Return(nil) + + err = ua.attachToBinary(target, config.Rules, NewProcInfo(procFS, proc.Pid)) + require.NoError(t, err) + mockMan.AssertExpectations(t) + + mockMan.On("DetachHook", expectedProbe.ProbeIdentificationPair).Return(nil) + err = ua.detachFromBinary(target) + require.NoError(t, err) + inspector.AssertExpectations(t) + mockMan.AssertExpectations(t) +} + +func TestAttachToBinaryAtReturnLocation(t *testing.T) { + proc := FakeProcFSEntry{ + Pid: 1, + Cmdline: "/bin/bash", + Exe: "/bin/bash", + } + procFS := CreateFakeProcFS(t, []FakeProcFSEntry{proc}) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: []*AttachRule{ + { + Targets: AttachToExecutable, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect__return"}}, + }, + }, + }, + } + + mockMan := &MockManager{} + inspector := &MockBinaryInspector{} + ua, err := NewUprobeAttacher("mock", config, mockMan, nil, inspector) + require.NoError(t, err) + require.NotNil(t, ua) + + target := utils.FilePath{ + HostPath: proc.Exe, + PID: proc.Pid, + } + + // Tell the inspector to return a simple symbol + symbolToAttach := bininspect.FunctionMetadata{EntryLocation: 0x1234, ReturnLocations: []uint64{0x0, 0x1}} + inspector.On("Inspect", target, mock.Anything).Return(map[string]bininspect.FunctionMetadata{"SSL_connect": symbolToAttach}, true, nil) + + // Tell the manager to return no probe when finding an existing one + var nilProbe *manager.Probe // we can't just pass nil directly, if we do that the mock cannot convert it to *manager.Probe + mockMan.On("GetProbe", mock.Anything).Return(nilProbe, false) + + // Tell the manager to accept the probe + uidBase := "1hipf" // this is the UID that the manager will generate, from a path identifier with 0/0 as device/inode + for n := 0; n < len(symbolToAttach.ReturnLocations); n++ { + expectedProbe := &manager.Probe{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "uprobe__SSL_connect__return", + UID: fmt.Sprintf("%sr%d", uidBase, n)}, + BinaryPath: target.HostPath, + UprobeOffset: symbolToAttach.ReturnLocations[n], + HookFuncName: "SSL_connect", + } + mockMan.On("AddHook", mock.Anything, expectedProbe).Return(nil) + } + + err = ua.attachToBinary(target, config.Rules, NewProcInfo(procFS, proc.Pid)) + require.NoError(t, err) + inspector.AssertExpectations(t) + mockMan.AssertExpectations(t) +} + +const mapsFileWithSSL = ` +08048000-08049000 r-xp 00000000 03:00 8312 /usr/lib/libssl.so +` + +func TestAttachToLibrariesOfPid(t *testing.T) { + proc := FakeProcFSEntry{ + Pid: 1, + Cmdline: "/bin/bash", + Exe: "/bin/bash", + Maps: mapsFileWithSSL, + } + procFS := CreateFakeProcFS(t, []FakeProcFSEntry{proc}) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: []*AttachRule{ + { + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "uprobe__SSL_connect", + }, + }, + }, + Targets: AttachToSharedLibraries, + }, + { + LibraryNameRegex: regexp.MustCompile(`libtls.so`), + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "uprobe__TLS_connect", + }, + }, + }, + Targets: AttachToSharedLibraries, + }, + }, + } + + mockMan := &MockManager{} + inspector := &MockBinaryInspector{} + registry := &MockFileRegistry{} + ua, err := NewUprobeAttacher("mock", config, mockMan, nil, inspector) + require.NoError(t, err) + require.NotNil(t, ua) + ua.fileRegistry = registry + + target := utils.FilePath{ + HostPath: "/usr/lib/libssl.so", + PID: proc.Pid, + } + + // Tell the inspector to return a simple symbol + symbolToAttach := bininspect.FunctionMetadata{EntryLocation: 0x1234} + inspector.On("Inspect", target, mock.Anything).Return(map[string]bininspect.FunctionMetadata{"SSL_connect": symbolToAttach}, true, nil) + + // Tell the manager to return no probe when finding an existing one + var nilProbe *manager.Probe // we can't just pass nil directly, if we do that the mock cannot convert it to *manager.Probe + mockMan.On("GetProbe", mock.Anything).Return(nilProbe, false) + + // Tell the manager to accept the probe + uid := "1hipfd0" // this is the UID that the manager will generate, from a path identifier with 0/0 as device/inode + expectedProbe := &manager.Probe{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect", UID: uid}, + BinaryPath: target.HostPath, + UprobeOffset: symbolToAttach.EntryLocation, + HookFuncName: "SSL_connect", + } + mockMan.On("AddHook", mock.Anything, expectedProbe).Return(nil) + + // Tell the registry to expect the process + registry.On("Register", target.HostPath, uint32(proc.Pid), mock.Anything, mock.Anything, mock.Anything).Return(nil) + + // if this function calls the manager adding a probe with a different name than the one we requested, the test + // will fail + err = ua.attachToLibrariesOfPID(proc.Pid) + require.NoError(t, err) + + // We need to retrieve the calls from the registry and manually call the callback + // to simulate the process being registered + registry.AssertExpectations(t) + cb := registry.Calls[0].Arguments[2].(utils.Callback) + require.NoError(t, cb(target)) + + inspector.AssertExpectations(t) + mockMan.AssertExpectations(t) +} + +type attachedProbe struct { + probe *manager.Probe + fpath *utils.FilePath +} + +func (ap *attachedProbe) String() string { + return fmt.Sprintf("attachedProbe{probe: %s, PID: %d, path: %s}", ap.probe.EBPFFuncName, ap.fpath.PID, ap.fpath.HostPath) +} + +func stringifyAttachedProbes(probes []attachedProbe) []string { + var result []string + for _, ap := range probes { + result = append(result, ap.String()) + } + return result +} + +func TestUprobeAttacher(t *testing.T) { + lib := getLibSSLPath(t) + ebpfCfg := ddebpf.NewConfig() + require.NotNil(t, ebpfCfg) + + if !sharedlibraries.IsSupported(ebpfCfg) { + t.Skip("Kernel version does not support shared libraries") + return + } + + buf, err := bytecode.GetReader(ebpfCfg.BPFDir, "uprobe_attacher-test.o") + require.NoError(t, err) + t.Cleanup(func() { buf.Close() }) + + connectProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"} + mainProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__main"} + + mgr := manager.Manager{} + + attacherCfg := AttacherConfig{ + Rules: []*AttachRule{ + { + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + Targets: AttachToSharedLibraries, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: connectProbeID}, + }, + }, + { + Targets: AttachToExecutable, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: mainProbeID}, + }, + ProbeOptionsOverride: map[string]ProbeOptions{ + mainProbeID.EBPFFuncName: { + IsManualReturn: false, + Symbol: "main.main", + }, + }, + }, + }, + ExcludeTargets: ExcludeInternal | ExcludeSelf, + EbpfConfig: ebpfCfg, + EnableDetailedLogging: true, + } + + var attachedProbes []attachedProbe + + callback := func(probe *manager.Probe, fpath *utils.FilePath) { + attachedProbes = append(attachedProbes, attachedProbe{probe: probe, fpath: fpath}) + } + + ua, err := NewUprobeAttacher("test", attacherCfg, &mgr, callback, &NativeBinaryInspector{}) + require.NoError(t, err) + require.NotNil(t, ua) + + require.NoError(t, mgr.InitWithOptions(buf, manager.Options{})) + require.NoError(t, mgr.Start()) + t.Cleanup(func() { mgr.Stop(manager.CleanAll) }) + require.NoError(t, ua.Start()) + t.Cleanup(ua.Stop) + + cmd, err := fileopener.OpenFromAnotherProcess(t, lib) + require.NoError(t, err) + + var connectProbe, mainProbe *attachedProbe + require.Eventually(t, func() bool { + // Find the probes we want to attach. + // Note that we might attach to other processes, so filter by ours only + for _, ap := range attachedProbes { + if ap.probe.EBPFFuncName == "uprobe__SSL_connect" && ap.fpath.PID == uint32(cmd.Process.Pid) { + connectProbe = &ap + } else if ap.probe.EBPFFuncName == "uprobe__main" && ap.fpath.PID == uint32(cmd.Process.Pid) { + mainProbe = &ap + } + } + + return connectProbe != nil && mainProbe != nil + }, 5*time.Second, 50*time.Millisecond, "expected to attach 2 probes, got %d: %v (%v)", len(attachedProbes), attachedProbes, stringifyAttachedProbes(attachedProbes)) + + require.NotNil(t, connectProbe) + // Allow suffix, as sometimes the path reported is /proc//root/ + require.True(t, strings.HasSuffix(connectProbe.fpath.HostPath, lib), "expected to attach to %s, got %s", lib, connectProbe.fpath.HostPath) + require.Equal(t, uint32(cmd.Process.Pid), connectProbe.fpath.PID) + + require.NotNil(t, mainProbe) + require.Equal(t, uint32(cmd.Process.Pid), mainProbe.fpath.PID) +} + +func launchProcessMonitor(t *testing.T, useEventStream bool) { + pm := monitor.GetProcessMonitor() + t.Cleanup(pm.Stop) + require.NoError(t, pm.Initialize(useEventStream)) + if useEventStream { + eventmonitortestutil.StartEventMonitor(t, procmontestutil.RegisterProcessMonitorEventConsumer) + } +} + +func createTempTestFile(t *testing.T, name string) (string, utils.PathIdentifier) { + fullPath := filepath.Join(t.TempDir(), name) + + f, err := os.Create(fullPath) + f.WriteString("foobar") + require.NoError(t, err) + f.Close() + t.Cleanup(func() { + os.RemoveAll(fullPath) + }) + + pathID, err := utils.NewPathIdentifier(fullPath) + require.NoError(t, err) + + return fullPath, pathID +} + +type SharedLibrarySuite struct { + suite.Suite +} + +func TestAttacherSharedLibrary(t *testing.T) { + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(tt *testing.T) { + if !sharedlibraries.IsSupported(ddebpf.NewConfig()) { + tt.Skip("shared library tracing not supported for this platform") + } + + tt.Run("netlink", func(ttt *testing.T) { + launchProcessMonitor(ttt, false) + suite.Run(ttt, new(SharedLibrarySuite)) + }) + + tt.Run("event stream", func(ttt *testing.T) { + launchProcessMonitor(ttt, true) + suite.Run(ttt, new(SharedLibrarySuite)) + }) + }) +} + +func (s *SharedLibrarySuite) TestSingleFile() { + t := s.T() + ebpfCfg := ddebpf.NewConfig() + + fooPath1, _ := createTempTestFile(t, "foo-libssl.so") + + attachCfg := AttacherConfig{ + Rules: []*AttachRule{{ + LibraryNameRegex: regexp.MustCompile(`foo-libssl.so`), + Targets: AttachToSharedLibraries, + }}, + EbpfConfig: ebpfCfg, + } + + ua, err := NewUprobeAttacher("test", attachCfg, &MockManager{}, nil, nil) + require.NoError(t, err) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry to return on any calls, we will check the values later + mockRegistry.On("Clear").Return() + mockRegistry.On("Unregister", mock.Anything).Return(nil) + mockRegistry.On("Register", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + require.NoError(t, ua.Start()) + t.Cleanup(ua.Stop) + + // open files + cmd, err := fileopener.OpenFromAnotherProcess(t, fooPath1) + require.NoError(t, err) + require.Eventually(t, func() bool { + return methodHasBeenCalledTimes(mockRegistry, "Register", 1) + }, 1500*time.Millisecond, 10*time.Millisecond, "received calls %v", mockRegistry.Calls) + + mockRegistry.AssertCalled(t, "Register", fooPath1, uint32(cmd.Process.Pid), mock.Anything, mock.Anything, mock.Anything) + + mockRegistry.Calls = nil + require.NoError(t, cmd.Process.Kill()) + + require.Eventually(t, func() bool { + // Other processes might have finished and forced the Unregister call to the registry + return methodHasBeenCalledWithPredicate(mockRegistry, "Unregister", func(call mock.Call) bool { + return call.Arguments[0].(uint32) == uint32(cmd.Process.Pid) + }) + }, time.Second*10, 200*time.Millisecond, "received calls %v", mockRegistry.Calls) + + mockRegistry.AssertCalled(t, "Unregister", uint32(cmd.Process.Pid)) +} + +func (s *SharedLibrarySuite) TestDetectionWithPIDAndRootNamespace() { + t := s.T() + ebpfCfg := ddebpf.NewConfig() + + _, err := os.Stat("/usr/bin/busybox") + if err != nil { + t.Skip("skip for the moment as some distro are not friendly with busybox package") + } + + tempDir := t.TempDir() + root := filepath.Join(tempDir, "root") + err = os.MkdirAll(root, 0755) + require.NoError(t, err) + + libpath := "/fooroot-crypto.so" + + err = exec.Command("cp", "/usr/bin/busybox", root+"/ash").Run() + require.NoError(t, err) + err = exec.Command("cp", "/usr/bin/busybox", root+"/sleep").Run() + require.NoError(t, err) + + attachCfg := AttacherConfig{ + Rules: []*AttachRule{{ + LibraryNameRegex: regexp.MustCompile(`fooroot-crypto.so`), + Targets: AttachToSharedLibraries, + }}, + EbpfConfig: ebpfCfg, + } + + ua, err := NewUprobeAttacher("test", attachCfg, &MockManager{}, nil, nil) + require.NoError(t, err) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry to return on any calls, we will check the values later + mockRegistry.On("Clear").Return() + mockRegistry.On("Unregister", mock.Anything).Return(nil) + mockRegistry.On("Register", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + require.NoError(t, ua.Start()) + t.Cleanup(ua.Stop) + + time.Sleep(10 * time.Millisecond) + // simulate a slow (1 second) : open, write, close of the file + // in a new pid and mount namespaces + o, err := exec.Command("unshare", "--fork", "--pid", "-R", root, "/ash", "-c", fmt.Sprintf("sleep 1 > %s", libpath)).CombinedOutput() + if err != nil { + t.Log(err, string(o)) + } + require.NoError(t, err) + + time.Sleep(10 * time.Millisecond) + + require.Eventually(t, func() bool { + return methodHasBeenCalledTimes(mockRegistry, "Register", 1) + }, time.Second*10, 100*time.Millisecond, "received calls %v", mockRegistry.Calls) + + // assert that soWatcher detected foo-crypto.so being opened and triggered the callback + foundCall := false + for _, call := range mockRegistry.Calls { + if call.Method == "Register" { + args := call.Arguments + require.True(t, strings.HasSuffix(args[0].(string), libpath)) + foundCall = true + } + } + require.True(t, foundCall) + + // must fail on the host + _, err = os.Stat(libpath) + require.Error(t, err) +} + +func methodHasBeenCalledTimes(registry *MockFileRegistry, methodName string, times int) bool { + calls := 0 + for _, call := range registry.Calls { + if call.Method == methodName { + calls++ + } + } + return calls == times +} + +func methodHasBeenCalledAtLeastTimes(registry *MockFileRegistry, methodName string, times int) bool { + calls := 0 + for _, call := range registry.Calls { + if call.Method == methodName { + calls++ + } + } + return calls >= times +} + +func methodHasBeenCalledWithPredicate(registry *MockFileRegistry, methodName string, predicate func(mock.Call) bool) bool { + for _, call := range registry.Calls { + if call.Method == methodName && predicate(call) { + return true + } + } + return false +} diff --git a/pkg/ebpf/uprobes/doc.go b/pkg/ebpf/uprobes/doc.go new file mode 100644 index 0000000000000..887d653ae8d82 --- /dev/null +++ b/pkg/ebpf/uprobes/doc.go @@ -0,0 +1,60 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +/* +Package uprobes contains methods to help handling the attachment of uprobes to +userspace programs + +The main type for this package is the UprobeAttacher type, created with +NewUprobeAttacher. The main configuration it requires is a list of rules that +define how to match the possible targets (shared libraries and/or executables) +and which probes to attach to them. Example usage: + + connectProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"} + mainProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__main"} + + mgr := manager.Manager{} + + attacherCfg := AttacherConfig{ + Rules: []*AttachRule{ + { + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + Targets: AttachToSharedLibraries, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: connectProbeID}, + }, + }, + { + Targets: AttachToExecutable, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: mainProbeID}, + }, + }, + }, + ExcludeTargets: ExcludeInternal | ExcludeSelf, + EbpfConfig: ebpfCfg, + } + + ua, err := NewUprobeAttacher("test", attacherCfg, &mgr, callback, &NativeBinaryInspector{}) + ua.Start() + +Once started, the attacher monitors new processes and `open` calls for new +shared libraries. For the first task it uses pkg/process/monitor/ProcessMonitor, +and for the second it uses the shared-libraries program in +pkg/network/usm/sharedlibraries. + +# Notes and things to take into account + + - When adding new probes, be sure to add the corresponding code to + match the libraries in + pkg/network/ebpf/c/shared-libraries/probes.h:do_sys_open_helper_exit, as an + initial filtering is performed there. + + - If multiple rules match a binary file, and we fail to attach the required probes for one of them, + the whole attach operation will be considered as failed, and the probes will be detached. If you want + to control which probes are optional and which are mandatory, you can use the manager.AllOf/manager.BestEffort + selectors in a single rule. +*/ +package uprobes diff --git a/pkg/ebpf/uprobes/inspector.go b/pkg/ebpf/uprobes/inspector.go new file mode 100644 index 0000000000000..16df19346b5e5 --- /dev/null +++ b/pkg/ebpf/uprobes/inspector.go @@ -0,0 +1,135 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "debug/elf" + "errors" + "fmt" + "runtime" + + manager "github.com/DataDog/ebpf-manager" + + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" + "github.com/DataDog/datadog-agent/pkg/util/common" +) + +// BinaryInspector implementors are responsible for extracting the metadata required to attach from a binary. +type BinaryInspector interface { + // Inspect returns the metadata required to attach to a binary. The first + // return is a map of symbol names to their corresponding metadata, the + // second return is a boolean indicating whether this binary is compatible + // and can be attached or not. It is encouraged to return early if the + // binary is not compatible, to avoid unnecessary work. In the future, the + // first and second return values should be merged into a single struct, but + // for now this allows us to keep the API compatible with the existing + // implementation. + Inspect(fpath utils.FilePath, requests []SymbolRequest) (map[string]bininspect.FunctionMetadata, bool, error) + + // Cleanup is called when a certain file path is not needed anymore, the implementation can clean up + // any resources associated with the file path. + Cleanup(fpath utils.FilePath) +} + +// SymbolRequest represents a request for symbols and associated data from a binary +type SymbolRequest struct { + // Name of the symbol to request + Name string + // BestEffort indicates that the symbol is not mandatory, and the inspector should not return an error if it is not found + BestEffort bool + // IncludeReturnLocations indicates that the inspector should also include the return locations of the function, for manual + // attachment into those return points instead of using uretprobes. + IncludeReturnLocations bool +} + +// NativeBinaryInspector is a BinaryInspector that uses the ELF format to extract the metadata directly from native functions. +type NativeBinaryInspector struct { +} + +// Ensure NativeBinaryInspector implements BinaryInspector +var _ BinaryInspector = &NativeBinaryInspector{} + +// Inspect extracts the metadata required to attach to a binary from the ELF file at the given path. +func (p *NativeBinaryInspector) Inspect(fpath utils.FilePath, requests []SymbolRequest) (map[string]bininspect.FunctionMetadata, bool, error) { + path := fpath.HostPath + elfFile, err := elf.Open(path) + if err != nil { + return nil, false, err + } + defer elfFile.Close() + + // This only allows amd64 and arm64 and not the 32-bit variants, but that + // is fine since we don't monitor 32-bit applications at all in the shared + // library watcher since compat syscalls aren't supported by the syscall + // trace points. We do actually monitor 32-bit applications for istio and + // nodejs monitoring, but our uprobe hooks only properly support 64-bit + // applications, so there's no harm in rejecting 32-bit applications here. + arch, err := bininspect.GetArchitecture(elfFile) + if err != nil { + return nil, false, fmt.Errorf("cannot get architecture of %s: %w", path, err) + } + + // Ignore foreign architectures. This can happen when running stuff under + // qemu-user, for example, and installing a uprobe will lead to segfaults + // since the foreign instructions will be patched with the native break + // instruction. + if string(arch) != runtime.GOARCH { + return nil, false, nil + } + + mandatorySymbols := make(common.StringSet, len(requests)) + bestEffortSymbols := make(common.StringSet, len(requests)) + + for _, req := range requests { + if req.BestEffort { + bestEffortSymbols.Add(req.Name) + } else { + mandatorySymbols.Add(req.Name) + } + + if req.IncludeReturnLocations { + return nil, false, errors.New("return locations are not supported by the native binary inspector") + } + } + + symbolMap, err := bininspect.GetAllSymbolsInSetByName(elfFile, mandatorySymbols) + if err != nil { + return nil, false, err + } + /* Best effort to resolve symbols, so we don't care about the error */ + symbolMapBestEffort, _ := bininspect.GetAllSymbolsInSetByName(elfFile, bestEffortSymbols) + + funcMap := make(map[string]bininspect.FunctionMetadata, len(symbolMap)+len(symbolMapBestEffort)) + for _, symMap := range []map[string]elf.Symbol{symbolMap, symbolMapBestEffort} { + for symbolName, symbol := range symMap { + m, err := p.symbolToFuncMetadata(elfFile, symbol) + if err != nil { + return nil, false, fmt.Errorf("failed to convert symbol %s to function metadata: %w", symbolName, err) + } + funcMap[symbolName] = *m + } + } + + return funcMap, true, nil +} + +func (*NativeBinaryInspector) symbolToFuncMetadata(elfFile *elf.File, sym elf.Symbol) (*bininspect.FunctionMetadata, error) { + manager.SanitizeUprobeAddresses(elfFile, []elf.Symbol{sym}) + offset, err := bininspect.SymbolToOffset(elfFile, sym) + if err != nil { + return nil, err + } + + return &bininspect.FunctionMetadata{EntryLocation: uint64(offset)}, nil +} + +// Cleanup is a no-op for the native inspector +func (*NativeBinaryInspector) Cleanup(_ utils.FilePath) { + // Nothing to do here for the native inspector +} diff --git a/pkg/ebpf/uprobes/inspector_test.go b/pkg/ebpf/uprobes/inspector_test.go new file mode 100644 index 0000000000000..56624eaff8673 --- /dev/null +++ b/pkg/ebpf/uprobes/inspector_test.go @@ -0,0 +1,63 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + + "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" +) + +func TestNativeBinarySymbolRetrieval(t *testing.T) { + curDir, err := testutil.CurDir() + require.NoError(t, err) + + libmmap := filepath.Join(curDir, "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace") + lib := filepath.Join(libmmap, fmt.Sprintf("libssl.so.%s", runtime.GOARCH)) + fpath := utils.FilePath{HostPath: lib} + + allMandatoryExisting := []SymbolRequest{{Name: "SSL_connect"}} + allBestEffortExisting := []SymbolRequest{{Name: "SSL_connect", BestEffort: true}} + mandatoryExistBestEffortDont := []SymbolRequest{{Name: "SSL_connect"}, {Name: "ThisFunctionDoesNotExistEver", BestEffort: true}} + mandatoryNonExisting := []SymbolRequest{{Name: "ThisFunctionDoesNotExistEver"}} + + inspector := &NativeBinaryInspector{} + + t.Run("MandatoryAllExist", func(tt *testing.T) { + result, compat, err := inspector.Inspect(fpath, allMandatoryExisting) + require.NoError(tt, err) + require.True(tt, compat) + require.ElementsMatch(tt, []string{"SSL_connect"}, maps.Keys(result)) + }) + + t.Run("BestEffortAllExist", func(tt *testing.T) { + result, compat, err := inspector.Inspect(fpath, allBestEffortExisting) + require.NoError(tt, err) + require.True(tt, compat) + require.ElementsMatch(tt, []string{"SSL_connect"}, maps.Keys(result)) + }) + + t.Run("BestEffortDontExist", func(tt *testing.T) { + result, compat, err := inspector.Inspect(fpath, mandatoryExistBestEffortDont) + require.NoError(tt, err) + require.True(tt, compat) + require.ElementsMatch(tt, []string{"SSL_connect"}, maps.Keys(result)) + }) + + t.Run("SomeMandatoryDontExist", func(tt *testing.T) { + _, _, err := inspector.Inspect(fpath, mandatoryNonExisting) + require.Error(tt, err, "should have failed to find mandatory symbols") + }) +} diff --git a/pkg/ebpf/uprobes/procfs.go b/pkg/ebpf/uprobes/procfs.go new file mode 100644 index 0000000000000..ff355e2fe9e1b --- /dev/null +++ b/pkg/ebpf/uprobes/procfs.go @@ -0,0 +1,119 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package uprobes + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strconv" + "sync" + "time" +) + +const procFSUpdateTimeout = 100 * time.Millisecond + +// ProcInfo holds the information extracted from procfs, to avoid repeat calls to the filesystem. +type ProcInfo struct { + procRoot string + PID uint32 + exe string + comm string +} + +// NewProcInfo creates a new ProcInfo object. +func NewProcInfo(procRoot string, pid uint32) *ProcInfo { + return &ProcInfo{ + procRoot: procRoot, + PID: pid, + } +} + +// Avoid allocations, reuse the error to mark "iteration start" in the loop +var errIterStart = errors.New("iteration start") + +func waitUntilSucceeds[T any](p *ProcInfo, procFile string, readFunc func(string) (T, error)) (T, error) { + // Read the exe link + pidAsStr := strconv.FormatUint(uint64(p.PID), 10) + filePath := filepath.Join(p.procRoot, pidAsStr, procFile) + + var result T + err := errIterStart + end := time.Now().Add(procFSUpdateTimeout) + + for err != nil && end.After(time.Now()) { + result, err = readFunc(filePath) + if err != nil { + time.Sleep(10 * time.Millisecond) + } + } + + return result, err +} + +// Exe returns the path to the executable of the process. +func (p *ProcInfo) Exe() (string, error) { + var err error + if p.exe == "" { + p.exe, err = waitUntilSucceeds(p, "exe", os.Readlink) + if err != nil { + return "", err + } + } + + if p.exe == "" { + return "", errors.New("exe link is empty") + } + + return p.exe, nil +} + +const ( + // Defined in https://man7.org/linux/man-pages/man5/proc.5.html. + taskCommLen = 16 +) + +var ( + taskCommLenBufferPool = sync.Pool{ + New: func() any { + buf := make([]byte, taskCommLen) + return &buf + }, + } +) + +func (p *ProcInfo) readComm(commFile string) (string, error) { + file, err := os.Open(commFile) + if err != nil { + return "", err + } + defer file.Close() + + buf := taskCommLenBufferPool.Get().(*[]byte) + defer taskCommLenBufferPool.Put(buf) + n, err := file.Read(*buf) + if err != nil { + // short living process can hit here, or slow start of another process. + return "", nil + } + return string(bytes.TrimSpace((*buf)[:n])), nil +} + +// Comm returns the command name of the process. +func (p *ProcInfo) Comm() (string, error) { + var err error + if p.comm == "" { + p.comm, err = waitUntilSucceeds(p, "comm", p.readComm) + if err != nil { + return "", err + } + } + + return p.comm, nil +} diff --git a/pkg/ebpf/uprobes/testutil.go b/pkg/ebpf/uprobes/testutil.go new file mode 100644 index 0000000000000..f61c45a8adb02 --- /dev/null +++ b/pkg/ebpf/uprobes/testutil.go @@ -0,0 +1,155 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strconv" + "testing" + + manager "github.com/DataDog/ebpf-manager" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" +) + +// === Mocks + +// MockManager is a mock implementation of the manager.Manager interface. +type MockManager struct { + mock.Mock +} + +// AddHook is a mock implementation of the manager.Manager.AddHook method. +func (m *MockManager) AddHook(name string, probe *manager.Probe) error { + args := m.Called(name, probe) + return args.Error(0) +} + +// DetachHook is a mock implementation of the manager.Manager.DetachHook method. +func (m *MockManager) DetachHook(probeID manager.ProbeIdentificationPair) error { + args := m.Called(probeID) + return args.Error(0) +} + +// GetProbe is a mock implementation of the manager.Manager.GetProbe method. +func (m *MockManager) GetProbe(probeID manager.ProbeIdentificationPair) (*manager.Probe, bool) { + args := m.Called(probeID) + return args.Get(0).(*manager.Probe), args.Bool(1) +} + +// MockFileRegistry is a mock implementation of the FileRegistry interface. +type MockFileRegistry struct { + mock.Mock +} + +// Register is a mock implementation of the FileRegistry.Register method. +func (m *MockFileRegistry) Register(namespacedPath string, pid uint32, activationCB, deactivationCB, alreadyRegistered utils.Callback) error { + args := m.Called(namespacedPath, pid, activationCB, deactivationCB) + return args.Error(0) +} + +// Unregister is a mock implementation of the FileRegistry.Unregister method. +func (m *MockFileRegistry) Unregister(pid uint32) error { + args := m.Called(pid) + return args.Error(0) +} + +// Clear is a mock implementation of the FileRegistry.Clear method. +func (m *MockFileRegistry) Clear() { + m.Called() +} + +// GetRegisteredProcesses is a mock implementation of the FileRegistry.GetRegisteredProcesses method. +func (m *MockFileRegistry) GetRegisteredProcesses() map[uint32]struct{} { + args := m.Called() + return args.Get(0).(map[uint32]struct{}) +} + +// MockBinaryInspector is a mock implementation of the BinaryInspector interface. +type MockBinaryInspector struct { + mock.Mock +} + +// Inspect is a mock implementation of the BinaryInspector.Inspect method. +func (m *MockBinaryInspector) Inspect(fpath utils.FilePath, requests []SymbolRequest) (map[string]bininspect.FunctionMetadata, bool, error) { + args := m.Called(fpath, requests) + return args.Get(0).(map[string]bininspect.FunctionMetadata), args.Bool(1), args.Error(2) +} + +// Cleanup is a mock implementation of the BinaryInspector.Cleanup method. +func (m *MockBinaryInspector) Cleanup(fpath utils.FilePath) { + _ = m.Called(fpath) +} + +// === Test utils + +// FakeProcFSEntry represents a fake /proc filesystem entry for testing purposes. +type FakeProcFSEntry struct { + Pid uint32 + Cmdline string + Command string + Exe string + Maps string +} + +// CreateFakeProcFS creates a fake /proc filesystem with the given entries, useful for testing attachment to processes. +func CreateFakeProcFS(t *testing.T, entries []FakeProcFSEntry) string { + procRoot := t.TempDir() + + for _, entry := range entries { + baseDir := filepath.Join(procRoot, strconv.Itoa(int(entry.Pid))) + + createFile(t, filepath.Join(baseDir, "cmdline"), entry.Cmdline) + createFile(t, filepath.Join(baseDir, "comm"), entry.Command) + createFile(t, filepath.Join(baseDir, "maps"), entry.Maps) + createSymlink(t, entry.Exe, filepath.Join(baseDir, "exe")) + } + + return procRoot +} + +func createFile(t *testing.T, path, data string) { + if data == "" { + return + } + + dir := filepath.Dir(path) + require.NoError(t, os.MkdirAll(dir, 0775)) + require.NoError(t, os.WriteFile(path, []byte(data), 0775)) +} + +func createSymlink(t *testing.T, target, link string) { + if target == "" { + return + } + + dir := filepath.Dir(link) + require.NoError(t, os.MkdirAll(dir, 0775)) + require.NoError(t, os.Symlink(target, link)) +} + +func getLibSSLPath(t *testing.T) string { + curDir, err := testutil.CurDir() + require.NoError(t, err) + + libmmap := filepath.Join(curDir, "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace") + return filepath.Join(libmmap, fmt.Sprintf("libssl.so.%s", runtime.GOARCH)) +} + +// SetRegistry allows changing the file registry used by the attacher. This is useful for testing purposes, to +// replace the registry with a mock object +func (ua *UprobeAttacher) SetRegistry(registry FileRegistry) { + ua.fileRegistry = registry +} diff --git a/pkg/eventmonitor/config/config.go b/pkg/eventmonitor/config/config.go index 65bffc9f13550..785b1ffd968ef 100644 --- a/pkg/eventmonitor/config/config.go +++ b/pkg/eventmonitor/config/config.go @@ -9,7 +9,7 @@ package config import ( "strings" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -34,8 +34,8 @@ type Config struct { func NewConfig() *Config { return &Config{ // event server - SocketPath: coreconfig.SystemProbe().GetString(join(evNS, "socket")), - EventServerBurst: coreconfig.SystemProbe().GetInt(join(evNS, "event_server.burst")), + SocketPath: pkgconfigsetup.SystemProbe().GetString(join(evNS, "socket")), + EventServerBurst: pkgconfigsetup.SystemProbe().GetInt(join(evNS, "event_server.burst")), // consumers ProcessConsumerEnabled: getBool("process.enabled"), @@ -54,9 +54,9 @@ func getAllKeys(key string) (string, string) { func getBool(key string) bool { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetBool(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetBool(deprecatedKey) } - return coreconfig.SystemProbe().GetBool(newKey) + return pkgconfigsetup.SystemProbe().GetBool(newKey) } diff --git a/pkg/eventmonitor/eventmonitor.go b/pkg/eventmonitor/eventmonitor.go index 1b57d1a9560b0..a0b4a84e2cc9f 100644 --- a/pkg/eventmonitor/eventmonitor.go +++ b/pkg/eventmonitor/eventmonitor.go @@ -21,7 +21,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" procstatsd "github.com/DataDog/datadog-agent/pkg/process/statsd" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -229,7 +228,7 @@ func (m *EventMonitor) GetStats() map[string]interface{} { } // NewEventMonitor instantiates an event monitoring system-probe module -func NewEventMonitor(config *config.Config, secconfig *secconfig.Config, opts Opts, wmeta workloadmeta.Component, telemetry telemetry.Component) (*EventMonitor, error) { +func NewEventMonitor(config *config.Config, secconfig *secconfig.Config, opts Opts, telemetry telemetry.Component) (*EventMonitor, error) { if opts.StatsdClient == nil { opts.StatsdClient = procstatsd.Client } @@ -238,7 +237,7 @@ func NewEventMonitor(config *config.Config, secconfig *secconfig.Config, opts Op opts.ProbeOpts.StatsdClient = opts.StatsdClient } - probe, err := probe.NewProbe(secconfig, opts.ProbeOpts, wmeta, telemetry) + probe, err := probe.NewProbe(secconfig, opts.ProbeOpts, telemetry) if err != nil { return nil, err } diff --git a/pkg/eventmonitor/testutil/testutil.go b/pkg/eventmonitor/testutil/testutil.go index 74bded51f3498..83e1083ad2659 100644 --- a/pkg/eventmonitor/testutil/testutil.go +++ b/pkg/eventmonitor/testutil/testutil.go @@ -15,11 +15,8 @@ import ( "github.com/stretchr/testify/require" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/eventmonitor" emconfig "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -43,11 +40,7 @@ func StartEventMonitor(t *testing.T, callback PreStartCallback) { opts := eventmonitor.Opts{} telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) - wmeta := fxutil.Test[workloadmeta.Component](t, - core.MockBundle(), - wmmock.MockModule(workloadmeta.NewParams()), - ) - evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, wmeta, telemetry) + evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, telemetry) require.NoError(t, err) require.NoError(t, evm.Init()) callback(t, evm) diff --git a/pkg/flare/archive.go b/pkg/flare/archive.go index 59024af02be5e..393fa43497798 100644 --- a/pkg/flare/archive.go +++ b/pkg/flare/archive.go @@ -27,7 +27,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/security" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/status/health" @@ -45,6 +45,11 @@ var cnfFileExtRx = regexp.MustCompile(`(?i)\.ya?ml`) // searchPaths is a list of path where to look for checks configurations type searchPaths map[string]string +// getProcessAPIAddress is an Alias to GetProcessAPIAddressPort using Datadog config +func getProcessAPIAddressPort() (string, error) { + return pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) +} + // CompleteFlare packages up the files with an already created builder. This is aimed to be used by the flare // component while we migrate to a component architecture. func CompleteFlare(fb flaretypes.FlareBuilder, diagnoseDeps diagnose.SuitesDeps) error { @@ -62,51 +67,51 @@ func CompleteFlare(fb flaretypes.FlareBuilder, diagnoseDeps diagnose.SuitesDeps) fb.AddFileFromFunc("tagger-list.json", getAgentTaggerList) //nolint:errcheck fb.AddFileFromFunc("workload-list.log", getAgentWorkloadList) //nolint:errcheck fb.AddFileFromFunc("process-agent_tagger-list.json", getProcessAgentTaggerList) //nolint:errcheck - if !config.Datadog().GetBool("process_config.run_in_core_agent.enabled") { - getChecksFromProcessAgent(fb, config.GetProcessAPIAddressPort) + if !pkgconfigsetup.Datadog().GetBool("process_config.run_in_core_agent.enabled") { + getChecksFromProcessAgent(fb, getProcessAPIAddressPort) } } - fb.RegisterFilePerm(security.GetAuthTokenFilepath(config.Datadog())) + fb.RegisterFilePerm(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())) - systemProbeConfigBPFDir := config.SystemProbe().GetString("system_probe_config.bpf_dir") + systemProbeConfigBPFDir := pkgconfigsetup.SystemProbe().GetString("system_probe_config.bpf_dir") if systemProbeConfigBPFDir != "" { fb.RegisterDirPerm(systemProbeConfigBPFDir) } addSystemProbePlatformSpecificEntries(fb) - if config.SystemProbe().GetBool("system_probe_config.enabled") { + if pkgconfigsetup.SystemProbe().GetBool("system_probe_config.enabled") { fb.AddFileFromFunc(filepath.Join("expvar", "system-probe"), getSystemProbeStats) //nolint:errcheck } pprofURL := fmt.Sprintf("http://127.0.0.1:%s/debug/pprof/goroutine?debug=2", - config.Datadog().GetString("expvar_port")) - - fb.AddFileFromFunc("process_agent_runtime_config_dump.yaml", getProcessAgentFullConfig) //nolint:errcheck - fb.AddFileFromFunc("runtime_config_dump.yaml", func() ([]byte, error) { return yaml.Marshal(config.Datadog().AllSettings()) }) //nolint:errcheck - fb.AddFileFromFunc("system_probe_runtime_config_dump.yaml", func() ([]byte, error) { return yaml.Marshal(config.SystemProbe().AllSettings()) }) //nolint:errcheck - fb.AddFileFromFunc("diagnose.log", getDiagnoses(fb.IsLocal(), diagnoseDeps)) //nolint:errcheck - fb.AddFileFromFunc("envvars.log", getEnvVars) //nolint:errcheck - fb.AddFileFromFunc("health.yaml", getHealth) //nolint:errcheck - fb.AddFileFromFunc("go-routine-dump.log", func() ([]byte, error) { return getHTTPCallContent(pprofURL) }) //nolint:errcheck - fb.AddFileFromFunc("docker_inspect.log", func() ([]byte, error) { return getDockerSelfInspect(diagnoseDeps.GetWMeta()) }) //nolint:errcheck - fb.AddFileFromFunc("docker_ps.log", getDockerPs) //nolint:errcheck - fb.AddFileFromFunc("k8s/kubelet_config.yaml", getKubeletConfig) //nolint:errcheck - fb.AddFileFromFunc("k8s/kubelet_pods.yaml", getKubeletPods) //nolint:errcheck - fb.AddFileFromFunc("ecs_metadata.json", getECSMeta) //nolint:errcheck + pkgconfigsetup.Datadog().GetString("expvar_port")) + + fb.AddFileFromFunc("process_agent_runtime_config_dump.yaml", getProcessAgentFullConfig) //nolint:errcheck + fb.AddFileFromFunc("runtime_config_dump.yaml", func() ([]byte, error) { return yaml.Marshal(pkgconfigsetup.Datadog().AllSettings()) }) //nolint:errcheck + fb.AddFileFromFunc("system_probe_runtime_config_dump.yaml", func() ([]byte, error) { return yaml.Marshal(pkgconfigsetup.SystemProbe().AllSettings()) }) //nolint:errcheck + fb.AddFileFromFunc("diagnose.log", getDiagnoses(fb.IsLocal(), diagnoseDeps)) //nolint:errcheck + fb.AddFileFromFunc("envvars.log", getEnvVars) //nolint:errcheck + fb.AddFileFromFunc("health.yaml", getHealth) //nolint:errcheck + fb.AddFileFromFunc("go-routine-dump.log", func() ([]byte, error) { return getHTTPCallContent(pprofURL) }) //nolint:errcheck + fb.AddFileFromFunc("docker_inspect.log", func() ([]byte, error) { return getDockerSelfInspect(diagnoseDeps.GetWMeta()) }) //nolint:errcheck + fb.AddFileFromFunc("docker_ps.log", getDockerPs) //nolint:errcheck + fb.AddFileFromFunc("k8s/kubelet_config.yaml", getKubeletConfig) //nolint:errcheck + fb.AddFileFromFunc("k8s/kubelet_pods.yaml", getKubeletPods) //nolint:errcheck + fb.AddFileFromFunc("ecs_metadata.json", getECSMeta) //nolint:errcheck getRegistryJSON(fb) getVersionHistory(fb) - fb.CopyFile(installinfo.GetFilePath(config.Datadog())) //nolint:errcheck + fb.CopyFile(installinfo.GetFilePath(pkgconfigsetup.Datadog())) //nolint:errcheck getExpVar(fb) //nolint:errcheck getWindowsData(fb) - telemetryURL := fmt.Sprintf("http://127.0.0.1:%s/telemetry", config.Datadog().GetString("expvar_port")) + telemetryURL := fmt.Sprintf("http://127.0.0.1:%s/telemetry", pkgconfigsetup.Datadog().GetString("expvar_port")) fb.AddFileFromFunc("telemetry.log", func() ([]byte, error) { return getHTTPCallContent(telemetryURL) }) //nolint:errcheck - if config.IsRemoteConfigEnabled(config.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { if err := exportRemoteConfig(fb); err != nil { log.Errorf("Could not export remote-config state: %s", err) } @@ -115,11 +120,11 @@ func CompleteFlare(fb flaretypes.FlareBuilder, diagnoseDeps diagnose.SuitesDeps) } func getVersionHistory(fb flaretypes.FlareBuilder) { - fb.CopyFile(filepath.Join(config.Datadog().GetString("run_path"), "version-history.json")) //nolint:errcheck + fb.CopyFile(filepath.Join(pkgconfigsetup.Datadog().GetString("run_path"), "version-history.json")) //nolint:errcheck } func getRegistryJSON(fb flaretypes.FlareBuilder) { - fb.CopyFile(filepath.Join(config.Datadog().GetString("logs_config.run_path"), "registry.json")) //nolint:errcheck + fb.CopyFile(filepath.Join(pkgconfigsetup.Datadog().GetString("logs_config.run_path"), "registry.json")) //nolint:errcheck } func getLogFiles(fb flaretypes.FlareBuilder, logFileDir string) { @@ -156,7 +161,7 @@ func getExpVar(fb flaretypes.FlareBuilder) error { } } - apmDebugPort := config.Datadog().GetInt("apm_config.debug.port") + apmDebugPort := pkgconfigsetup.Datadog().GetInt("apm_config.debug.port") f := filepath.Join("expvar", "trace-agent") resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/debug/vars", apmDebugPort)) if err != nil { @@ -184,7 +189,7 @@ func getExpVar(fb flaretypes.FlareBuilder) error { func getSystemProbeStats() ([]byte, error) { // TODO: (components) - Temporary until we can use the status component to extract the system probe status from it. stats := map[string]interface{}{} - systemprobeStatus.GetStatus(stats, config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + systemprobeStatus.GetStatus(stats, pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) sysProbeBuf, err := yaml.Marshal(stats["systemProbeStats"]) if err != nil { return nil, err @@ -195,7 +200,7 @@ func getSystemProbeStats() ([]byte, error) { // getProcessAgentFullConfig fetches process-agent runtime config as YAML and returns it to be added to process_agent_runtime_config_dump.yaml func getProcessAgentFullConfig() ([]byte, error) { - addressPort, err := config.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("wrong configuration to connect to process-agent") } @@ -226,8 +231,8 @@ func getConfigFiles(fb flaretypes.FlareBuilder, confSearchPaths map[string]strin }) } - if config.Datadog().ConfigFileUsed() != "" { - mainConfpath := config.Datadog().ConfigFileUsed() + if pkgconfigsetup.Datadog().ConfigFileUsed() != "" { + mainConfpath := pkgconfigsetup.Datadog().ConfigFileUsed() confDir := filepath.Dir(mainConfpath) // zip up the config file that was actually used, if one exists @@ -253,7 +258,7 @@ func getChecksFromProcessAgent(fb flaretypes.FlareBuilder, getAddressPort func() getCheck := func(checkName, setting string) { filename := fmt.Sprintf("%s_check_output.json", checkName) - if !config.Datadog().GetBool(setting) { + if !pkgconfigsetup.Datadog().GetBool(setting) { fb.AddFile(filename, []byte(fmt.Sprintf("'%s' is disabled", setting))) //nolint:errcheck return } @@ -313,18 +318,18 @@ func getDiagnoses(isFlareLocal bool, deps diagnose.SuitesDeps) func() ([]byte, e } func getAgentTaggerList() ([]byte, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - taggerListURL := fmt.Sprintf("https://%v:%v/agent/tagger-list", ipcAddress, config.Datadog().GetInt("cmd_port")) + taggerListURL := fmt.Sprintf("https://%v:%v/agent/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) return getTaggerList(taggerListURL) } func getProcessAgentTaggerList() ([]byte, error) { - addressPort, err := config.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("wrong configuration to connect to process-agent") } @@ -354,12 +359,12 @@ func getTaggerList(remoteURL string) ([]byte, error) { } func getAgentWorkloadList() ([]byte, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - return getWorkloadList(fmt.Sprintf("https://%v:%v/agent/workload-list?verbose=true", ipcAddress, config.Datadog().GetInt("cmd_port"))) + return getWorkloadList(fmt.Sprintf("https://%v:%v/agent/workload-list?verbose=true", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port"))) } func getWorkloadList(url string) ([]byte, error) { diff --git a/pkg/flare/archive_dca.go b/pkg/flare/archive_dca.go index 6cf4a37a32d4a..f031efb4a5ac7 100644 --- a/pkg/flare/archive_dca.go +++ b/pkg/flare/archive_dca.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/status" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/render" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -36,7 +36,7 @@ func CreateDCAArchive(local bool, distPath, logFilePath string, pdata ProfileDat } confSearchPaths := map[string]string{ - "": config.Datadog().GetString("confd_path"), + "": pkgconfigsetup.Datadog().GetString("confd_path"), "dist": filepath.Join(distPath, "conf.d"), } @@ -75,14 +75,14 @@ func createDCAArchive(fb flaretypes.FlareBuilder, confSearchPaths map[string]str fb.AddFileFromFunc("workload-list.log", getDCAWorkloadList) //nolint:errcheck getPerformanceProfileDCA(fb, pdata) - if config.Datadog().GetBool("external_metrics_provider.enabled") { + if pkgconfigsetup.Datadog().GetBool("external_metrics_provider.enabled") { getHPAStatus(fb) //nolint:errcheck } } // QueryDCAMetrics gets the metrics payload exposed by the cluster agent func QueryDCAMetrics() ([]byte, error) { - r, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", config.Datadog().GetInt("metrics_port"))) + r, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", pkgconfigsetup.Datadog().GetInt("metrics_port"))) if err != nil { return nil, err } @@ -168,23 +168,23 @@ func getClusterAgentDiagnose(fb flaretypes.FlareBuilder) error { } func getDCATaggerList() ([]byte, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - taggerListURL := fmt.Sprintf("https://%v:%v/tagger-list", ipcAddress, config.Datadog().GetInt("cluster_agent.cmd_port")) + taggerListURL := fmt.Sprintf("https://%v:%v/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) return getTaggerList(taggerListURL) } func getDCAWorkloadList() ([]byte, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - return getWorkloadList(fmt.Sprintf("https://%v:%v/workload-list?verbose=true", ipcAddress, config.Datadog().GetInt("cluster_agent.cmd_port"))) + return getWorkloadList(fmt.Sprintf("https://%v:%v/workload-list?verbose=true", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"))) } func getPerformanceProfileDCA(fb flaretypes.FlareBuilder, pdata ProfileData) { diff --git a/pkg/flare/archive_linux.go b/pkg/flare/archive_linux.go index 0374d1688bf69..ac03dd5d4ad3d 100644 --- a/pkg/flare/archive_linux.go +++ b/pkg/flare/archive_linux.go @@ -18,11 +18,11 @@ import ( "github.com/DataDog/ebpf-manager/tracefs" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func addSystemProbePlatformSpecificEntries(fb flaretypes.FlareBuilder) { - sysprobeSocketLocation := config.SystemProbe().GetString("system_probe_config.sysprobe_socket") + sysprobeSocketLocation := pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket") if sysprobeSocketLocation != "" { fb.RegisterDirPerm(filepath.Dir(sysprobeSocketLocation)) } diff --git a/pkg/flare/archive_security.go b/pkg/flare/archive_security.go index bcdb281c20ea8..85545b8c0b139 100644 --- a/pkg/flare/archive_security.go +++ b/pkg/flare/archive_security.go @@ -11,7 +11,7 @@ import ( flarehelpers "github.com/DataDog/datadog-agent/comp/core/flare/helpers" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" "github.com/DataDog/datadog-agent/comp/core/status" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -61,7 +61,7 @@ func createSecurityAgentArchive(fb flaretypes.FlareBuilder, logFilePath string, } func getComplianceFiles(fb flaretypes.FlareBuilder) error { - compDir := config.Datadog().GetString("compliance_config.dir") + compDir := pkgconfigsetup.Datadog().GetString("compliance_config.dir") return fb.CopyDirTo(compDir, "compliance.d", func(path string) bool { f, err := os.Lstat(path) @@ -73,7 +73,7 @@ func getComplianceFiles(fb flaretypes.FlareBuilder) error { } func getRuntimeFiles(fb flaretypes.FlareBuilder) error { - runtimeDir := config.SystemProbe().GetString("runtime_security_config.policies.dir") + runtimeDir := pkgconfigsetup.SystemProbe().GetString("runtime_security_config.policies.dir") return fb.CopyDirTo(runtimeDir, "runtime-security.d", func(path string) bool { f, err := os.Lstat(path) diff --git a/pkg/flare/archive_test.go b/pkg/flare/archive_test.go index d58d877579119..6e6036d2ddee6 100644 --- a/pkg/flare/archive_test.go +++ b/pkg/flare/archive_test.go @@ -26,7 +26,6 @@ import ( flarehelpers "github.com/DataDog/datadog-agent/comp/core/flare/helpers" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" model "github.com/DataDog/datadog-agent/pkg/config/model" ) @@ -302,7 +301,7 @@ func TestProcessAgentChecks(t *testing.T) { setupIPCAddress(t, configmock.New(t), srv.URL) mock := flarehelpers.NewFlareBuilderMock(t, false) - getChecksFromProcessAgent(mock.Fb, config.GetProcessAPIAddressPort) + getChecksFromProcessAgent(mock.Fb, getProcessAPIAddressPort) mock.AssertFileContent(string(expectedProcessesJSON), "process_check_output.json") mock.AssertFileContent(string(expectedContainersJSON), "container_check_output.json") diff --git a/pkg/flare/cluster_checks.go b/pkg/flare/cluster_checks.go index 68a004f465389..47108857ab8e2 100644 --- a/pkg/flare/cluster_checks.go +++ b/pkg/flare/cluster_checks.go @@ -17,18 +17,18 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // GetClusterChecks dumps the clustercheck dispatching state to the writer func GetClusterChecks(w io.Writer, checkName string) error { - urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks", config.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) if w != color.Output { color.NoColor = true } - if !config.Datadog().GetBool("cluster_checks.enabled") { + if !pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") { fmt.Fprintln(w, "Cluster-checks are not enabled") return nil } @@ -36,7 +36,7 @@ func GetClusterChecks(w io.Writer, checkName string) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - err := util.SetAuthToken(config.Datadog()) + err := util.SetAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } @@ -114,7 +114,7 @@ func GetEndpointsChecks(w io.Writer, checkName string) error { return nil } - urlstr := fmt.Sprintf("https://localhost:%v/api/v1/endpointschecks/configs", config.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr := fmt.Sprintf("https://localhost:%v/api/v1/endpointschecks/configs", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) if w != color.Output { color.NoColor = true @@ -123,7 +123,7 @@ func GetEndpointsChecks(w io.Writer, checkName string) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - if err := util.SetAuthToken(config.Datadog()); err != nil { + if err := util.SetAuthToken(pkgconfigsetup.Datadog()); err != nil { return err } @@ -153,7 +153,7 @@ func GetEndpointsChecks(w io.Writer, checkName string) error { } func endpointschecksEnabled() bool { - for _, provider := range config.Datadog().GetStringSlice("extra_config_providers") { + for _, provider := range pkgconfigsetup.Datadog().GetStringSlice("extra_config_providers") { if provider == names.KubeEndpointsRegisterName { return true } diff --git a/pkg/flare/config_check.go b/pkg/flare/config_check.go index a575043366852..6d6bfca1970b8 100644 --- a/pkg/flare/config_check.go +++ b/pkg/flare/config_check.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/api/util" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // GetClusterAgentConfigCheck gets config check from the server for cluster agent @@ -24,14 +24,14 @@ func GetClusterAgentConfigCheck(w io.Writer, withDebug bool) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - err := util.SetAuthToken(config.Datadog()) + err := util.SetAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } targetURL := url.URL{ Scheme: "https", - Host: fmt.Sprintf("localhost:%v", config.Datadog().GetInt("cluster_agent.cmd_port")), + Host: fmt.Sprintf("localhost:%v", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")), Path: "config-check", } diff --git a/pkg/flare/envvars.go b/pkg/flare/envvars.go index 183e2e2861429..7f53fcbc2ceef 100644 --- a/pkg/flare/envvars.go +++ b/pkg/flare/envvars.go @@ -11,7 +11,7 @@ import ( "os" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var allowedEnvvarNames = []string{ @@ -138,7 +138,7 @@ var allowedEnvvarNames = []string{ func getAllowedEnvvars() []string { allowed := allowedEnvvarNames - allowed = append(allowed, config.Datadog().GetEnvVars()...) + allowed = append(allowed, pkgconfigsetup.Datadog().GetEnvVars()...) var found []string for _, envvar := range os.Environ() { parts := strings.SplitN(envvar, "=", 2) diff --git a/pkg/flare/flare.go b/pkg/flare/flare.go index 0f384f0d9f895..4fa84cdada489 100644 --- a/pkg/flare/flare.go +++ b/pkg/flare/flare.go @@ -7,13 +7,13 @@ package flare import ( "github.com/DataDog/datadog-agent/comp/core/flare/helpers" - "github.com/DataDog/datadog-agent/pkg/config" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" ) // SendFlare sends a flare and returns the message returned by the backend. This entry point is deprecated in favor of // the 'Send' method of the flare component. func SendFlare(cfg pkgconfigmodel.Reader, archivePath string, caseID string, email string, source helpers.FlareSource) (string, error) { - return helpers.SendTo(cfg, archivePath, caseID, email, config.Datadog().GetString("api_key"), utils.GetInfraEndpoint(config.Datadog()), source) + return helpers.SendTo(cfg, archivePath, caseID, email, pkgconfigsetup.Datadog().GetString("api_key"), utils.GetInfraEndpoint(pkgconfigsetup.Datadog()), source) } diff --git a/pkg/flare/remote_config.go b/pkg/flare/remote_config.go index 17350cbf0ae3e..12094ea947fb9 100644 --- a/pkg/flare/remote_config.go +++ b/pkg/flare/remote_config.go @@ -23,7 +23,7 @@ import ( flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util" agentgrpc "github.com/DataDog/datadog-agent/pkg/util/grpc" @@ -36,7 +36,7 @@ func exportRemoteConfig(fb flaretypes.FlareBuilder) error { } // Dump the state - token, err := security.FetchAuthToken(config.Datadog()) + token, err := security.FetchAuthToken(pkgconfigsetup.Datadog()) if err != nil { return fmt.Errorf("couldn't get auth token: %v", err) } @@ -47,12 +47,12 @@ func exportRemoteConfig(fb flaretypes.FlareBuilder) error { } ctx = metadata.NewOutgoingContext(ctx, md) - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - cli, err := agentgrpc.GetDDAgentSecureClient(ctx, ipcAddress, config.GetIPCPort()) + cli, err := agentgrpc.GetDDAgentSecureClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } @@ -64,7 +64,7 @@ func exportRemoteConfig(fb flaretypes.FlareBuilder) error { } var haState *pbgo.GetStateConfigResponse - if config.Datadog().GetBool("multi_region_failover.enabled") { + if pkgconfigsetup.Datadog().GetBool("multi_region_failover.enabled") { if haState, err = cli.GetConfigStateHA(ctx, in); err != nil { return fmt.Errorf("couldn't get the MRF repositories state: %v", err) } @@ -97,7 +97,7 @@ func hashRCTargets(raw []byte) []byte { func getRemoteConfigDB(fb flaretypes.FlareBuilder) error { dstPath, _ := fb.PrepareFilePath("remote-config.db") tempPath, _ := fb.PrepareFilePath("remote-config.temp.db") - srcPath := filepath.Join(config.Datadog().GetString("run_path"), "remote-config.db") + srcPath := filepath.Join(pkgconfigsetup.Datadog().GetString("run_path"), "remote-config.db") // Copies the db so it avoids bbolt from being locked // Also avoid concurrent modifications diff --git a/pkg/fleet/bootstraper/bootstraper.go b/pkg/fleet/bootstraper/bootstraper.go index 453b00afd6821..9077d09962b03 100644 --- a/pkg/fleet/bootstraper/bootstraper.go +++ b/pkg/fleet/bootstraper/bootstraper.go @@ -9,40 +9,24 @@ package bootstraper import ( "context" "fmt" - "os" "github.com/DataDog/datadog-agent/pkg/fleet/env" "github.com/DataDog/datadog-agent/pkg/fleet/internal/bootstrap" "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" "github.com/DataDog/datadog-agent/pkg/fleet/internal/oci" -) - -const ( - installerPackage = "datadog-installer" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" ) // Bootstrap bootstraps the installer and uses it to install the default packages. func Bootstrap(ctx context.Context, env *env.Env) error { version := "latest" - if env.DefaultPackagesVersionOverride[installerPackage] != "" { - version = env.DefaultPackagesVersionOverride[installerPackage] + if env.DefaultPackagesVersionOverride[bootstrap.InstallerPackage] != "" { + version = env.DefaultPackagesVersionOverride[bootstrap.InstallerPackage] } - installerURL := oci.PackageURL(env, installerPackage, version) + installerURL := oci.PackageURL(env, bootstrap.InstallerPackage, version) err := bootstrap.Install(ctx, env, installerURL) if err != nil { return fmt.Errorf("failed to bootstrap the installer: %w", err) } - - cmd := exec.NewInstallerExec(env, exec.StableInstallerPath) - defaultPackages, err := cmd.DefaultPackages(ctx) - if err != nil { - return fmt.Errorf("failed to get default packages: %w", err) - } - for _, url := range defaultPackages { - err = cmd.Install(ctx, url, nil) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to install package %s: %v\n", url, err) - } - } - return nil + return exec.NewInstallerExec(env, paths.StableInstallerPath).Setup(ctx) } diff --git a/pkg/fleet/daemon/daemon.go b/pkg/fleet/daemon/daemon.go index beeb8aafdfa60..bb61722f55ff1 100644 --- a/pkg/fleet/daemon/daemon.go +++ b/pkg/fleet/daemon/daemon.go @@ -40,7 +40,7 @@ const ( // gcInterval is the interval at which the GC will run gcInterval = 1 * time.Hour // refreshStateInterval is the interval at which the state will be refreshed - refreshStateInterval = 5 * time.Minute + refreshStateInterval = 1 * time.Minute ) // Daemon is the fleet daemon in charge of remote install, updates and configuration. @@ -53,6 +53,9 @@ type Daemon interface { StartExperiment(ctx context.Context, url string) error StopExperiment(ctx context.Context, pkg string) error PromoteExperiment(ctx context.Context, pkg string) error + StartConfigExperiment(ctx context.Context, pkg string, hash string) error + StopConfigExperiment(ctx context.Context, pkg string) error + PromoteConfigExperiment(ctx context.Context, pkg string) error GetPackage(pkg string, version string) (Package, error) GetState() (map[string]repository.State, error) @@ -67,7 +70,7 @@ type daemonImpl struct { env *env.Env installer installer.Installer rc *remoteConfig - cdn *cdn.CDN + cdn cdn.CDN catalog catalog requests chan remoteAPIRequest requestsWG sync.WaitGroup @@ -94,15 +97,19 @@ func NewDaemon(rcFetcher client.ConfigFetcher, config config.Reader) (Daemon, er } env := env.FromConfig(config) installer := newInstaller(env, installerBin) - return newDaemon(rc, installer, env), nil + cdn, err := cdn.New(env, "opt/datadog-packages/run/rc/daemon") + if err != nil { + return nil, err + } + return newDaemon(rc, installer, env, cdn), nil } -func newDaemon(rc *remoteConfig, installer installer.Installer, env *env.Env) *daemonImpl { +func newDaemon(rc *remoteConfig, installer installer.Installer, env *env.Env, cdn cdn.CDN) *daemonImpl { i := &daemonImpl{ env: env, rc: rc, installer: installer, - cdn: cdn.New(env), + cdn: cdn, requests: make(chan remoteAPIRequest, 32), catalog: catalog{}, stopChan: make(chan struct{}), @@ -233,6 +240,7 @@ func (d *daemonImpl) Stop(_ context.Context) error { defer d.m.Unlock() d.rc.Close() close(d.stopChan) + d.cdn.Close() d.requestsWG.Wait() return nil } @@ -344,6 +352,72 @@ func (d *daemonImpl) stopExperiment(ctx context.Context, pkg string) (err error) return nil } +// StartConfigExperiment starts a config experiment with the given package. +func (d *daemonImpl) StartConfigExperiment(ctx context.Context, url string, version string) error { + d.m.Lock() + defer d.m.Unlock() + return d.startConfigExperiment(ctx, url, version) +} + +func (d *daemonImpl) startConfigExperiment(ctx context.Context, url string, version string) (err error) { + span, ctx := tracer.StartSpanFromContext(ctx, "start_config_experiment") + defer func() { span.Finish(tracer.WithError(err)) }() + d.refreshState(ctx) + defer d.refreshState(ctx) + + log.Infof("Daemon: Starting config experiment for package from %s", url) + err = d.installer.InstallConfigExperiment(ctx, url, version) + if err != nil { + return fmt.Errorf("could not start config experiment: %w", err) + } + log.Infof("Daemon: Successfully started config experiment for package from %s", url) + return nil +} + +// PromoteConfigExperiment promotes the experiment to stable. +func (d *daemonImpl) PromoteConfigExperiment(ctx context.Context, pkg string) error { + d.m.Lock() + defer d.m.Unlock() + return d.promoteConfigExperiment(ctx, pkg) +} + +func (d *daemonImpl) promoteConfigExperiment(ctx context.Context, pkg string) (err error) { + span, ctx := tracer.StartSpanFromContext(ctx, "promote_config_experiment") + defer func() { span.Finish(tracer.WithError(err)) }() + d.refreshState(ctx) + defer d.refreshState(ctx) + + log.Infof("Daemon: Promoting config experiment for package %s", pkg) + err = d.installer.PromoteConfigExperiment(ctx, pkg) + if err != nil { + return fmt.Errorf("could not promote config experiment: %w", err) + } + log.Infof("Daemon: Successfully promoted config experiment for package %s", pkg) + return nil +} + +// StopConfigExperiment stops the experiment. +func (d *daemonImpl) StopConfigExperiment(ctx context.Context, pkg string) error { + d.m.Lock() + defer d.m.Unlock() + return d.stopConfigExperiment(ctx, pkg) +} + +func (d *daemonImpl) stopConfigExperiment(ctx context.Context, pkg string) (err error) { + span, ctx := tracer.StartSpanFromContext(ctx, "stop_config_experiment") + defer func() { span.Finish(tracer.WithError(err)) }() + d.refreshState(ctx) + defer d.refreshState(ctx) + + log.Infof("Daemon: Stopping config experiment for package %s", pkg) + err = d.installer.RemoveConfigExperiment(ctx, pkg) + if err != nil { + return fmt.Errorf("could not stop config experiment: %w", err) + } + log.Infof("Daemon: Successfully stopped config experiment for package %s", pkg) + return nil +} + func (d *daemonImpl) handleCatalogUpdate(c catalog) error { d.m.Lock() defer d.m.Unlock() @@ -371,8 +445,18 @@ func (d *daemonImpl) handleRemoteAPIRequest(request remoteAPIRequest) (err error if err != nil { return fmt.Errorf("could not get installer state: %w", err) } + + c, err := d.installer.ConfigState(request.Package) + if err != nil { + return fmt.Errorf("could not get installer config state: %w", err) + } + versionEqual := request.ExpectedState.InstallerVersion == "" || version.AgentVersion == request.ExpectedState.InstallerVersion - if versionEqual && s.Stable != request.ExpectedState.Stable || s.Experiment != request.ExpectedState.Experiment { + if versionEqual && + (s.Stable != request.ExpectedState.Stable || + s.Experiment != request.ExpectedState.Experiment || + c.Stable != request.ExpectedState.StableConfig || + c.Experiment != request.ExpectedState.ExperimentConfig) { log.Infof("remote request %s not executed as state does not match: expected %v, got %v", request.ID, request.ExpectedState, s) setRequestInvalid(ctx) d.refreshState(ctx) @@ -403,6 +487,22 @@ func (d *daemonImpl) handleRemoteAPIRequest(request remoteAPIRequest) (err error case methodPromoteExperiment: log.Infof("Installer: Received remote request %s to promote experiment for package %s", request.ID, request.Package) return d.promoteExperiment(ctx, request.Package) + + case methodStartConfigExperiment: + var params taskWithVersionParams + err = json.Unmarshal(request.Params, ¶ms) + if err != nil { + return fmt.Errorf("could not unmarshal start experiment params: %w", err) + } + log.Infof("Installer: Received remote request %s to start config experiment for package %s", request.ID, request.Package) + return d.startConfigExperiment(ctx, request.Package, params.Version) + case methodStopConfigExperiment: + log.Infof("Installer: Received remote request %s to stop config experiment for package %s", request.ID, request.Package) + return d.stopConfigExperiment(ctx, request.Package) + case methodPromoteConfigExperiment: + log.Infof("Installer: Received remote request %s to promote config experiment for package %s", request.ID, request.Package) + return d.promoteConfigExperiment(ctx, request.Package) + default: return fmt.Errorf("unknown method: %s", request.Method) } diff --git a/pkg/fleet/daemon/daemon_test.go b/pkg/fleet/daemon/daemon_test.go index 4e3d0941553ee..9ef255c84d3f6 100644 --- a/pkg/fleet/daemon/daemon_test.go +++ b/pkg/fleet/daemon/daemon_test.go @@ -20,9 +20,9 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config/remote/client" "github.com/DataDog/datadog-agent/pkg/fleet/env" "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/cdn" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/version" @@ -86,6 +86,21 @@ func (m *testPackageManager) PromoteExperiment(ctx context.Context, pkg string) return args.Error(0) } +func (m *testPackageManager) InstallConfigExperiment(ctx context.Context, url string, hash string) error { + args := m.Called(ctx, url, hash) + return args.Error(0) +} + +func (m *testPackageManager) RemoveConfigExperiment(ctx context.Context, pkg string) error { + args := m.Called(ctx, pkg) + return args.Error(0) +} + +func (m *testPackageManager) PromoteConfigExperiment(ctx context.Context, pkg string) error { + args := m.Called(ctx, pkg) + return args.Error(0) +} + func (m *testPackageManager) GarbageCollect(ctx context.Context) error { args := m.Called(ctx) return args.Error(0) @@ -101,16 +116,21 @@ func (m *testPackageManager) UninstrumentAPMInjector(ctx context.Context, method return args.Error(0) } +func (m *testPackageManager) Close() error { + args := m.Called() + return args.Error(0) +} + type testRemoteConfigClient struct { sync.Mutex t *testing.T - listeners map[string][]client.Handler + listeners map[string][]func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) } func newTestRemoteConfigClient(t *testing.T) *testRemoteConfigClient { return &testRemoteConfigClient{ t: t, - listeners: make(map[string][]client.Handler), + listeners: make(map[string][]func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus))), } } @@ -123,7 +143,7 @@ func (c *testRemoteConfigClient) Close() { func (c *testRemoteConfigClient) Subscribe(product string, fn func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus))) { c.Lock() defer c.Unlock() - c.listeners[product] = append(c.listeners[product], client.Handler(fn)) + c.listeners[product] = append(c.listeners[product], fn) } func (c *testRemoteConfigClient) SetInstallerState(_ []*pbgo.PackageState) { @@ -187,8 +207,13 @@ func newTestInstaller(t *testing.T) *testInstaller { pm.On("ConfigStates").Return(map[string]repository.State{}, nil) rcc := newTestRemoteConfigClient(t) rc := &remoteConfig{client: rcc} + env := &env.Env{RemoteUpdates: true} + cdn, err := cdn.New(env, t.TempDir()) + require.NoError(t, err) + daemon := newDaemon(rc, pm, env, cdn) + require.NoError(t, err) i := &testInstaller{ - daemonImpl: newDaemon(rc, pm, &env.Env{RemoteUpdates: true}), + daemonImpl: daemon, rcc: rcc, pm: pm, } @@ -311,10 +336,11 @@ func TestRemoteRequest(t *testing.T) { ID: "test-request-1", Method: methodStartExperiment, Package: testExperimentPackage.Name, - ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version}, + ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, StableConfig: testStablePackage.Version}, Params: versionParamsJSON, } i.pm.On("State", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version}, nil).Once() + i.pm.On("ConfigState", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version}, nil).Once() i.pm.On("InstallExperiment", mock.Anything, testExperimentPackage.URL).Return(nil).Once() i.rcc.SubmitRequest(testRequest) i.requestsWG.Wait() @@ -323,9 +349,10 @@ func TestRemoteRequest(t *testing.T) { ID: "test-request-2", Method: methodStopExperiment, Package: testExperimentPackage.Name, - ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version}, + ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version, StableConfig: testStablePackage.Version}, } i.pm.On("State", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version}, nil).Once() + i.pm.On("ConfigState", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version}, nil).Once() i.pm.On("RemoveExperiment", mock.Anything, testExperimentPackage.Name).Return(nil).Once() i.rcc.SubmitRequest(testRequest) i.requestsWG.Wait() @@ -334,9 +361,10 @@ func TestRemoteRequest(t *testing.T) { ID: "test-request-3", Method: methodPromoteExperiment, Package: testExperimentPackage.Name, - ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version}, + ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version, StableConfig: testStablePackage.Version}, } i.pm.On("State", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version}, nil).Once() + i.pm.On("ConfigState", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version}, nil).Once() i.pm.On("PromoteExperiment", mock.Anything, testExperimentPackage.Name).Return(nil).Once() i.rcc.SubmitRequest(testRequest) i.requestsWG.Wait() diff --git a/pkg/fleet/daemon/local_api.go b/pkg/fleet/daemon/local_api.go index 5f4b58bcb9ea4..290829541be3a 100644 --- a/pkg/fleet/daemon/local_api.go +++ b/pkg/fleet/daemon/local_api.go @@ -110,6 +110,9 @@ func (l *localAPIImpl) handler() http.Handler { r.HandleFunc("/{package}/experiment/start", l.startExperiment).Methods(http.MethodPost) r.HandleFunc("/{package}/experiment/stop", l.stopExperiment).Methods(http.MethodPost) r.HandleFunc("/{package}/experiment/promote", l.promoteExperiment).Methods(http.MethodPost) + r.HandleFunc("/{package}/config_experiment/start", l.startConfigExperiment).Methods(http.MethodPost) + r.HandleFunc("/{package}/config_experiment/stop", l.stopConfigExperiment).Methods(http.MethodPost) + r.HandleFunc("/{package}/config_experiment/promote", l.promoteConfigExperiment).Methods(http.MethodPost) r.HandleFunc("/{package}/install", l.install).Methods(http.MethodPost) return r } @@ -221,6 +224,63 @@ func (l *localAPIImpl) promoteExperiment(w http.ResponseWriter, r *http.Request) } } +// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/config_experiment/start -d '{"version":"1.21.5"}' +func (l *localAPIImpl) startConfigExperiment(w http.ResponseWriter, r *http.Request) { + pkg := mux.Vars(r)["package"] + w.Header().Set("Content-Type", "application/json") + var request taskWithVersionParams + var response APIResponse + defer func() { + _ = json.NewEncoder(w).Encode(response) + }() + err := json.NewDecoder(r.Body).Decode(&request) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + response.Error = &APIError{Message: err.Error()} + return + } + err = l.daemon.StartConfigExperiment(r.Context(), pkg, request.Version) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + response.Error = &APIError{Message: err.Error()} + return + } +} + +// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/config_experiment/stop -d '{}' +func (l *localAPIImpl) stopConfigExperiment(w http.ResponseWriter, r *http.Request) { + pkg := mux.Vars(r)["package"] + w.Header().Set("Content-Type", "application/json") + var response APIResponse + defer func() { + _ = json.NewEncoder(w).Encode(response) + }() + log.Infof("Received local request to stop experiment for package %s", pkg) + err := l.daemon.StopConfigExperiment(r.Context(), pkg) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + response.Error = &APIError{Message: err.Error()} + return + } +} + +// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/config_experiment/promote -d '{}' +func (l *localAPIImpl) promoteConfigExperiment(w http.ResponseWriter, r *http.Request) { + pkg := mux.Vars(r)["package"] + w.Header().Set("Content-Type", "application/json") + var response APIResponse + defer func() { + _ = json.NewEncoder(w).Encode(response) + }() + log.Infof("Received local request to promote experiment for package %s", pkg) + err := l.daemon.PromoteConfigExperiment(r.Context(), pkg) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + response.Error = &APIError{Message: err.Error()} + return + } +} + // example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/install -d '{"version":"1.21.5"}' func (l *localAPIImpl) install(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] @@ -265,6 +325,9 @@ type LocalAPIClient interface { StartExperiment(pkg, version string) error StopExperiment(pkg string) error PromoteExperiment(pkg string) error + StartConfigExperiment(pkg, version string) error + StopConfigExperiment(pkg string) error + PromoteConfigExperiment(pkg string) error } // LocalAPIClient is a client to interact with the locally exposed daemon API. @@ -414,6 +477,85 @@ func (c *localAPIClientImpl) PromoteExperiment(pkg string) error { return nil } +// StartConfigExperiment starts a config experiment for a package. +func (c *localAPIClientImpl) StartConfigExperiment(pkg, version string) error { + params := taskWithVersionParams{ + Version: version, + } + body, err := json.Marshal(params) + if err != nil { + return err + } + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s/%s/config_experiment/start", c.addr, pkg), bytes.NewBuffer(body)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + var response APIResponse + err = json.NewDecoder(resp.Body).Decode(&response) + if err != nil { + return err + } + if response.Error != nil { + return fmt.Errorf("error starting config experiment: %s", response.Error.Message) + } + return nil +} + +// StopConfigExperiment stops an experiment for a package. +func (c *localAPIClientImpl) StopConfigExperiment(pkg string) error { + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s/%s/config_experiment/stop", c.addr, pkg), nil) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + var response APIResponse + err = json.NewDecoder(resp.Body).Decode(&response) + if err != nil { + return err + } + if response.Error != nil { + return fmt.Errorf("error stopping config experiment: %s", response.Error.Message) + } + return nil +} + +// PromoteConfigExperiment promotes an experiment for a package. +func (c *localAPIClientImpl) PromoteConfigExperiment(pkg string) error { + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s/%s/config_experiment/promote", c.addr, pkg), nil) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + resp, err := c.client.Do(req) + if err != nil { + return err + } + var response APIResponse + err = json.NewDecoder(resp.Body).Decode(&response) + if err != nil { + return err + } + if response.Error != nil { + return fmt.Errorf("error promoting config experiment: %s", response.Error.Message) + } + defer resp.Body.Close() + return nil +} + // Install installs a package with a specific version. func (c *localAPIClientImpl) Install(pkg, version string) error { params := taskWithVersionParams{ diff --git a/pkg/fleet/daemon/local_api_test.go b/pkg/fleet/daemon/local_api_test.go index 6017884168f43..23ceefa153793 100644 --- a/pkg/fleet/daemon/local_api_test.go +++ b/pkg/fleet/daemon/local_api_test.go @@ -56,6 +56,21 @@ func (m *testDaemon) PromoteExperiment(ctx context.Context, pkg string) error { return args.Error(0) } +func (m *testDaemon) StartConfigExperiment(ctx context.Context, url string, hash string) error { + args := m.Called(ctx, url, hash) + return args.Error(0) +} + +func (m *testDaemon) StopConfigExperiment(ctx context.Context, pkg string) error { + args := m.Called(ctx, pkg) + return args.Error(0) +} + +func (m *testDaemon) PromoteConfigExperiment(ctx context.Context, pkg string) error { + args := m.Called(ctx, pkg) + return args.Error(0) +} + func (m *testDaemon) GetPackage(pkg string, version string) (Package, error) { args := m.Called(pkg, version) return args.Get(0).(Package), args.Error(1) diff --git a/pkg/fleet/daemon/remote_config.go b/pkg/fleet/daemon/remote_config.go index 9509185899ccd..bd479ca737cd5 100644 --- a/pkg/fleet/daemon/remote_config.go +++ b/pkg/fleet/daemon/remote_config.go @@ -100,7 +100,7 @@ func (c *catalog) getPackage(pkg string, version string, arch string, platform s type handleCatalogUpdate func(catalog catalog) error -func handleUpdaterCatalogDDUpdate(h handleCatalogUpdate, firstCatalogApplied func()) client.Handler { +func handleUpdaterCatalogDDUpdate(h handleCatalogUpdate, firstCatalogApplied func()) func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) { var catalogOnce sync.Once return func(catalogConfigs map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) { var mergedCatalog catalog @@ -167,6 +167,10 @@ const ( methodStartExperiment = "start_experiment" methodStopExperiment = "stop_experiment" methodPromoteExperiment = "promote_experiment" + + methodStartConfigExperiment = "start_experiment_config" + methodStopConfigExperiment = "stop_experiment_config" + methodPromoteConfigExperiment = "promote_experiment_config" ) type remoteAPIRequest struct { @@ -183,6 +187,8 @@ type expectedState struct { InstallerVersion string `json:"installer_version"` Stable string `json:"stable"` Experiment string `json:"experiment"` + StableConfig string `json:"stable_config"` + ExperimentConfig string `json:"experiment_config"` } type taskWithVersionParams struct { @@ -192,7 +198,7 @@ type taskWithVersionParams struct { type handleRemoteAPIRequest func(request remoteAPIRequest) error -func handleUpdaterTaskUpdate(h handleRemoteAPIRequest) client.Handler { +func handleUpdaterTaskUpdate(h handleRemoteAPIRequest) func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) { var executedRequests = make(map[string]struct{}) return func(requestConfigs map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) { requests := map[string]remoteAPIRequest{} diff --git a/pkg/fleet/env/env.go b/pkg/fleet/env/env.go index 780382c143c30..18cf17ea6ed06 100644 --- a/pkg/fleet/env/env.go +++ b/pkg/fleet/env/env.go @@ -12,7 +12,7 @@ import ( "slices" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/utils" ) @@ -23,12 +23,15 @@ const ( envRemotePolicies = "DD_REMOTE_POLICIES" envRegistryURL = "DD_INSTALLER_REGISTRY_URL" envRegistryAuth = "DD_INSTALLER_REGISTRY_AUTH" + envRegistryUsername = "DD_INSTALLER_REGISTRY_USERNAME" + envRegistryPassword = "DD_INSTALLER_REGISTRY_PASSWORD" envDefaultPackageVersion = "DD_INSTALLER_DEFAULT_PKG_VERSION" envDefaultPackageInstall = "DD_INSTALLER_DEFAULT_PKG_INSTALL" envApmLibraries = "DD_APM_INSTRUMENTATION_LIBRARIES" envAgentMajorVersion = "DD_AGENT_MAJOR_VERSION" envAgentMinorVersion = "DD_AGENT_MINOR_VERSION" envApmLanguages = "DD_APM_INSTRUMENTATION_LANGUAGES" + envCDNLocalDirPath = "DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH" ) var defaultEnv = Env{ @@ -38,8 +41,12 @@ var defaultEnv = Env{ RegistryOverride: "", RegistryAuthOverride: "", + RegistryUsername: "", + RegistryPassword: "", RegistryOverrideByImage: map[string]string{}, RegistryAuthOverrideByImage: map[string]string{}, + RegistryUsernameByImage: map[string]string{}, + RegistryPasswordByImage: map[string]string{}, DefaultPackagesInstallOverride: map[string]bool{}, DefaultPackagesVersionOverride: map[string]string{}, @@ -64,8 +71,12 @@ type Env struct { RegistryOverride string RegistryAuthOverride string + RegistryUsername string + RegistryPassword string RegistryOverrideByImage map[string]string RegistryAuthOverrideByImage map[string]string + RegistryUsernameByImage map[string]string + RegistryPasswordByImage map[string]string DefaultPackagesInstallOverride map[string]bool DefaultPackagesVersionOverride map[string]string @@ -76,6 +87,8 @@ type Env struct { AgentMinorVersion string InstallScript InstallScriptEnv + + CDNLocalDirPath string } // FromEnv returns an Env struct with values from the environment. @@ -88,8 +101,12 @@ func FromEnv() *Env { RegistryOverride: getEnvOrDefault(envRegistryURL, defaultEnv.RegistryOverride), RegistryAuthOverride: getEnvOrDefault(envRegistryAuth, defaultEnv.RegistryAuthOverride), + RegistryUsername: getEnvOrDefault(envRegistryUsername, defaultEnv.RegistryUsername), + RegistryPassword: getEnvOrDefault(envRegistryPassword, defaultEnv.RegistryPassword), RegistryOverrideByImage: overridesByNameFromEnv(envRegistryURL, func(s string) string { return s }), RegistryAuthOverrideByImage: overridesByNameFromEnv(envRegistryAuth, func(s string) string { return s }), + RegistryUsernameByImage: overridesByNameFromEnv(envRegistryUsername, func(s string) string { return s }), + RegistryPasswordByImage: overridesByNameFromEnv(envRegistryPassword, func(s string) string { return s }), DefaultPackagesInstallOverride: overridesByNameFromEnv(envDefaultPackageInstall, func(s string) bool { return strings.ToLower(s) == "true" }), DefaultPackagesVersionOverride: overridesByNameFromEnv(envDefaultPackageVersion, func(s string) string { return s }), @@ -100,11 +117,13 @@ func FromEnv() *Env { AgentMinorVersion: os.Getenv(envAgentMinorVersion), InstallScript: installScriptEnvFromEnv(), + + CDNLocalDirPath: getEnvOrDefault(envCDNLocalDirPath, ""), } } // FromConfig returns an Env struct with values from the configuration. -func FromConfig(config config.Reader) *Env { +func FromConfig(config model.Reader) *Env { return &Env{ APIKey: utils.SanitizeAPIKey(config.GetString("api_key")), Site: config.GetString("site"), @@ -112,6 +131,8 @@ func FromConfig(config config.Reader) *Env { RemotePolicies: config.GetBool("remote_policies"), RegistryOverride: config.GetString("installer.registry.url"), RegistryAuthOverride: config.GetString("installer.registry.auth"), + RegistryUsername: config.GetString("installer.registry.username"), + RegistryPassword: config.GetString("installer.registry.password"), } } @@ -136,6 +157,12 @@ func (e *Env) ToEnv() []string { if e.RegistryAuthOverride != "" { env = append(env, envRegistryAuth+"="+e.RegistryAuthOverride) } + if e.RegistryUsername != "" { + env = append(env, envRegistryUsername+"="+e.RegistryUsername) + } + if e.RegistryPassword != "" { + env = append(env, envRegistryPassword+"="+e.RegistryPassword) + } if len(e.ApmLibraries) > 0 { libraries := []string{} for l, v := range e.ApmLibraries { @@ -150,6 +177,8 @@ func (e *Env) ToEnv() []string { } env = append(env, overridesByNameToEnv(envRegistryURL, e.RegistryOverrideByImage)...) env = append(env, overridesByNameToEnv(envRegistryAuth, e.RegistryAuthOverrideByImage)...) + env = append(env, overridesByNameToEnv(envRegistryUsername, e.RegistryUsernameByImage)...) + env = append(env, overridesByNameToEnv(envRegistryPassword, e.RegistryPasswordByImage)...) env = append(env, overridesByNameToEnv(envDefaultPackageInstall, e.DefaultPackagesInstallOverride)...) env = append(env, overridesByNameToEnv(envDefaultPackageVersion, e.DefaultPackagesVersionOverride)...) return env diff --git a/pkg/fleet/env/env_test.go b/pkg/fleet/env/env_test.go index b4e86c46e3f93..9c42a96f99983 100644 --- a/pkg/fleet/env/env_test.go +++ b/pkg/fleet/env/env_test.go @@ -25,8 +25,12 @@ func TestFromEnv(t *testing.T) { Site: "datadoghq.com", RegistryOverride: "", RegistryAuthOverride: "", + RegistryUsername: "", + RegistryPassword: "", RegistryOverrideByImage: map[string]string{}, RegistryAuthOverrideByImage: map[string]string{}, + RegistryUsernameByImage: map[string]string{}, + RegistryPasswordByImage: map[string]string{}, DefaultPackagesInstallOverride: map[string]bool{}, DefaultPackagesVersionOverride: map[string]string{}, ApmLibraries: map[ApmLibLanguage]ApmLibVersion{}, @@ -44,10 +48,16 @@ func TestFromEnv(t *testing.T) { envRemotePolicies: "true", envRegistryURL: "registry.example.com", envRegistryAuth: "auth", + envRegistryUsername: "username", + envRegistryPassword: "password", envRegistryURL + "_IMAGE": "another.registry.example.com", envRegistryURL + "_ANOTHER_IMAGE": "yet.another.registry.example.com", envRegistryAuth + "_IMAGE": "another.auth", envRegistryAuth + "_ANOTHER_IMAGE": "yet.another.auth", + envRegistryUsername + "_IMAGE": "another.username", + envRegistryUsername + "_ANOTHER_IMAGE": "yet.another.username", + envRegistryPassword + "_IMAGE": "another.password", + envRegistryPassword + "_ANOTHER_IMAGE": "yet.another.password", envDefaultPackageInstall + "_PACKAGE": "true", envDefaultPackageInstall + "_ANOTHER_PACKAGE": "false", envDefaultPackageVersion + "_PACKAGE": "1.2.3", @@ -62,6 +72,8 @@ func TestFromEnv(t *testing.T) { RemotePolicies: true, RegistryOverride: "registry.example.com", RegistryAuthOverride: "auth", + RegistryUsername: "username", + RegistryPassword: "password", RegistryOverrideByImage: map[string]string{ "image": "another.registry.example.com", "another-image": "yet.another.registry.example.com", @@ -70,6 +82,14 @@ func TestFromEnv(t *testing.T) { "image": "another.auth", "another-image": "yet.another.auth", }, + RegistryUsernameByImage: map[string]string{ + "image": "another.username", + "another-image": "yet.another.username", + }, + RegistryPasswordByImage: map[string]string{ + "image": "another.password", + "another-image": "yet.another.password", + }, DefaultPackagesInstallOverride: map[string]bool{ "package": true, "another-package": false, @@ -100,6 +120,8 @@ func TestFromEnv(t *testing.T) { RegistryAuthOverride: "", RegistryOverrideByImage: map[string]string{}, RegistryAuthOverrideByImage: map[string]string{}, + RegistryUsernameByImage: map[string]string{}, + RegistryPasswordByImage: map[string]string{}, DefaultPackagesInstallOverride: map[string]bool{}, DefaultPackagesVersionOverride: map[string]string{}, ApmLibraries: map[ApmLibLanguage]ApmLibVersion{ @@ -133,6 +155,8 @@ func TestFromEnv(t *testing.T) { }, RegistryOverrideByImage: map[string]string{}, RegistryAuthOverrideByImage: map[string]string{}, + RegistryUsernameByImage: map[string]string{}, + RegistryPasswordByImage: map[string]string{}, DefaultPackagesInstallOverride: map[string]bool{}, DefaultPackagesVersionOverride: map[string]string{}, }, @@ -171,6 +195,8 @@ func TestToEnv(t *testing.T) { RemotePolicies: true, RegistryOverride: "registry.example.com", RegistryAuthOverride: "auth", + RegistryUsername: "username", + RegistryPassword: "password", RegistryOverrideByImage: map[string]string{ "image": "another.registry.example.com", "another-image": "yet.another.registry.example.com", @@ -179,6 +205,14 @@ func TestToEnv(t *testing.T) { "image": "another.auth", "another-image": "yet.another.auth", }, + RegistryUsernameByImage: map[string]string{ + "image": "another.username", + "another-image": "yet.another.username", + }, + RegistryPasswordByImage: map[string]string{ + "image": "another.password", + "another-image": "yet.another.password", + }, DefaultPackagesInstallOverride: map[string]bool{ "package": true, "another-package": false, @@ -200,11 +234,17 @@ func TestToEnv(t *testing.T) { "DD_REMOTE_POLICIES=true", "DD_INSTALLER_REGISTRY_URL=registry.example.com", "DD_INSTALLER_REGISTRY_AUTH=auth", + "DD_INSTALLER_REGISTRY_USERNAME=username", + "DD_INSTALLER_REGISTRY_PASSWORD=password", "DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:latest,java,ruby:1.2", "DD_INSTALLER_REGISTRY_URL_IMAGE=another.registry.example.com", "DD_INSTALLER_REGISTRY_URL_ANOTHER_IMAGE=yet.another.registry.example.com", "DD_INSTALLER_REGISTRY_AUTH_IMAGE=another.auth", "DD_INSTALLER_REGISTRY_AUTH_ANOTHER_IMAGE=yet.another.auth", + "DD_INSTALLER_REGISTRY_USERNAME_IMAGE=another.username", + "DD_INSTALLER_REGISTRY_USERNAME_ANOTHER_IMAGE=yet.another.username", + "DD_INSTALLER_REGISTRY_PASSWORD_IMAGE=another.password", + "DD_INSTALLER_REGISTRY_PASSWORD_ANOTHER_IMAGE=yet.another.password", "DD_INSTALLER_DEFAULT_PKG_INSTALL_PACKAGE=true", "DD_INSTALLER_DEFAULT_PKG_INSTALL_ANOTHER_PACKAGE=false", "DD_INSTALLER_DEFAULT_PKG_VERSION_PACKAGE=1.2.3", diff --git a/pkg/fleet/installer/default_packages.go b/pkg/fleet/installer/default_packages.go index dad23e5ce3241..27e767ad6758d 100644 --- a/pkg/fleet/installer/default_packages.go +++ b/pkg/fleet/installer/default_packages.go @@ -40,7 +40,7 @@ var apmPackageDefaultVersions = map[string]string{ "datadog-apm-library-java": "1", "datadog-apm-library-ruby": "2", "datadog-apm-library-js": "5", - "datadog-apm-library-dotnet": "2", + "datadog-apm-library-dotnet": "3", "datadog-apm-library-python": "2", "datadog-apm-library-php": "1", } diff --git a/pkg/fleet/installer/default_packages_test.go b/pkg/fleet/installer/default_packages_test.go index 1707fff0a5609..c1d9734a68e4b 100644 --- a/pkg/fleet/installer/default_packages_test.go +++ b/pkg/fleet/installer/default_packages_test.go @@ -34,7 +34,7 @@ func TestDefaultPackagesAPMInjectEnabled(t *testing.T) { "oci://gcr.io/datadoghq/apm-library-java-package:1", "oci://gcr.io/datadoghq/apm-library-ruby-package:2", "oci://gcr.io/datadoghq/apm-library-js-package:5", - "oci://gcr.io/datadoghq/apm-library-dotnet-package:2", + "oci://gcr.io/datadoghq/apm-library-dotnet-package:3", "oci://gcr.io/datadoghq/apm-library-python-package:2", }, packages) } diff --git a/pkg/fleet/installer/installer.go b/pkg/fleet/installer/installer.go index 6e910d161f71f..55238d2c8c384 100644 --- a/pkg/fleet/installer/installer.go +++ b/pkg/fleet/installer/installer.go @@ -60,10 +60,16 @@ type Installer interface { RemoveExperiment(ctx context.Context, pkg string) error PromoteExperiment(ctx context.Context, pkg string) error + InstallConfigExperiment(ctx context.Context, pkg string, version string) error + RemoveConfigExperiment(ctx context.Context, pkg string) error + PromoteConfigExperiment(ctx context.Context, pkg string) error + GarbageCollect(ctx context.Context) error InstrumentAPMInjector(ctx context.Context, method string) error UninstrumentAPMInjector(ctx context.Context, method string) error + + Close() error } // installerImpl is the implementation of the package manager. @@ -71,7 +77,7 @@ type installerImpl struct { m sync.Mutex env *env.Env - cdn *cdn.CDN + cdn cdn.CDN db *db.PackagesDB downloader *oci.Downloader packages *repository.Repositories @@ -82,7 +88,7 @@ type installerImpl struct { } // NewInstaller returns a new Package Manager. -func NewInstaller(env *env.Env) (Installer, error) { +func NewInstaller(env *env.Env, configDBPath string) (Installer, error) { err := ensureRepositoriesExist() if err != nil { return nil, fmt.Errorf("could not ensure packages and config directory exists: %w", err) @@ -91,9 +97,13 @@ func NewInstaller(env *env.Env) (Installer, error) { if err != nil { return nil, fmt.Errorf("could not create packages db: %w", err) } + cdn, err := cdn.New(env, configDBPath) + if err != nil { + return nil, fmt.Errorf("could not create CDN client: %w", err) + } return &installerImpl{ env: env, - cdn: cdn.New(env), + cdn: cdn, db: db, downloader: oci.NewDownloader(env, http.DefaultClient), packages: repository.NewRepositories(paths.PackagesPath, paths.LocksPath), @@ -238,6 +248,7 @@ func (i *installerImpl) InstallExperiment(ctx context.Context, url string) error if err != nil { return fmt.Errorf("could not set experiment: %w", err) } + return i.startExperiment(ctx, pkg.Name) } @@ -284,6 +295,76 @@ func (i *installerImpl) PromoteExperiment(ctx context.Context, pkg string) error return i.promoteExperiment(ctx, pkg) } +// InstallConfigExperiment installs an experiment on top of an existing package. +func (i *installerImpl) InstallConfigExperiment(ctx context.Context, pkg string, version string) error { + i.m.Lock() + defer i.m.Unlock() + + config, err := i.cdn.Get(ctx) + if err != nil { + return fmt.Errorf("could not get cdn config: %w", err) + } + if config.Version != version { + return fmt.Errorf("version mismatch: expected %s, got %s", config.Version, version) + } + + tmpDir, err := i.packages.MkdirTemp() + if err != nil { + return fmt.Errorf("could not create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + + // Note: this is definitely not package agnostic, because the CDN isn't. + // This will be addressed in a follow-up PR + err = service.WriteAgentConfig(config, tmpDir) + if err != nil { + return fmt.Errorf("could not write agent config: %w", err) + } + + configRepo := i.configs.Get(pkg) + err = configRepo.SetExperiment(version, tmpDir) + if err != nil { + return fmt.Errorf("could not set experiment: %w", err) + } + + switch runtime.GOOS { + case "windows": + return nil // TODO: start config experiment for Windows + default: + return i.startExperiment(ctx, pkg) + } +} + +// RemoveConfigExperiment removes an experiment. +func (i *installerImpl) RemoveConfigExperiment(ctx context.Context, pkg string) error { + i.m.Lock() + defer i.m.Unlock() + + err := i.stopExperiment(ctx, pkg) + if err != nil { + return fmt.Errorf("could not stop experiment: %w", err) + } + repository := i.configs.Get(pkg) + err = repository.DeleteExperiment() + if err != nil { + return fmt.Errorf("could not delete experiment: %w", err) + } + return nil +} + +// PromoteConfigExperiment promotes an experiment to stable. +func (i *installerImpl) PromoteConfigExperiment(ctx context.Context, pkg string) error { + i.m.Lock() + defer i.m.Unlock() + + repository := i.configs.Get(pkg) + err := repository.PromoteExperiment() + if err != nil { + return fmt.Errorf("could not promote experiment: %w", err) + } + return i.promoteExperiment(ctx, pkg) +} + // Purge removes all packages. func (i *installerImpl) Purge(ctx context.Context) { i.m.Lock() @@ -396,6 +477,11 @@ func (i *installerImpl) UninstrumentAPMInjector(ctx context.Context, method stri return nil } +// Close cleans up the Installer's dependencies +func (i *installerImpl) Close() error { + return i.cdn.Close() +} + func (i *installerImpl) startExperiment(ctx context.Context, pkg string) error { switch pkg { case packageDatadogAgent: diff --git a/pkg/fleet/installer/service/commands.go b/pkg/fleet/installer/service/commands.go deleted file mode 100644 index dc9472d61a368..0000000000000 --- a/pkg/fleet/installer/service/commands.go +++ /dev/null @@ -1,45 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build !windows - -// Package service provides a way to interact with os services -package service - -import ( - "bytes" - "context" - "fmt" - "os/exec" - "strings" -) - -type commandRunner interface { - runWithError() error -} - -type realCmd struct { - *exec.Cmd -} - -func (r *realCmd) runWithError() error { - var errBuf bytes.Buffer - r.Stderr = &errBuf - err := r.Cmd.Run() - if err == nil { - return nil - } - - if len(errBuf.Bytes()) == 0 { - return fmt.Errorf("command failed: %s", err.Error()) - } - - return fmt.Errorf("command failed: %s \n%s", strings.TrimSpace(errBuf.String()), err.Error()) -} - -func newCommandRunner(ctx context.Context, name string, args ...string) commandRunner { - cmd := exec.CommandContext(ctx, name, args...) - return &realCmd{Cmd: cmd} -} diff --git a/pkg/fleet/installer/service/datadog_agent.go b/pkg/fleet/installer/service/datadog_agent.go index 30a69271d59d9..2ebee39b191e3 100644 --- a/pkg/fleet/installer/service/datadog_agent.go +++ b/pkg/fleet/installer/service/datadog_agent.go @@ -75,7 +75,7 @@ func SetupAgent(ctx context.Context, _ []string) (err error) { span, ctx := tracer.StartSpanFromContext(ctx, "setup_agent") defer func() { if err != nil { - log.Errorf("Failed to setup agent: %s, reverting", err) + log.Errorf("Failed to setup agent, reverting: %s", err) err = errors.Join(err, RemoveAgent(ctx)) } span.Finish(tracer.WithError(err)) @@ -195,11 +195,6 @@ func stopOldAgentUnits(ctx context.Context) error { defer span.Finish() for _, unit := range stableUnits { if err := stopUnit(ctx, unit); err != nil { - exitError, ok := err.(*exec.ExitError) - if ok && exitError.ExitCode() == 5 { - // exit code 5 means the unit is not loaded, we can continue - continue - } return fmt.Errorf("failed to stop %s: %v", unit, err) } if err := disableUnit(ctx, unit); err != nil { @@ -250,7 +245,7 @@ func PromoteAgentExperiment(ctx context.Context) error { } // ConfigureAgent configures the stable agent -func ConfigureAgent(ctx context.Context, cdn *cdn.CDN, configs *repository.Repositories) error { +func ConfigureAgent(ctx context.Context, cdn cdn.CDN, configs *repository.Repositories) error { config, err := cdn.Get(ctx) if err != nil { return fmt.Errorf("could not get cdn config: %w", err) @@ -260,37 +255,47 @@ func ConfigureAgent(ctx context.Context, cdn *cdn.CDN, configs *repository.Repos return fmt.Errorf("could not create temporary directory: %w", err) } defer os.RemoveAll(tmpDir) + + err = WriteAgentConfig(config, tmpDir) + if err != nil { + return fmt.Errorf("could not write agent config: %w", err) + } + + err = configs.Create(agentPackage, config.Version, tmpDir) + if err != nil { + return fmt.Errorf("could not create repository: %w", err) + } + return nil +} + +// WriteAgentConfig writes the agent configuration to the given directory +func WriteAgentConfig(config *cdn.Config, dir string) error { ddAgentUID, ddAgentGID, err := getAgentIDs() if err != nil { return fmt.Errorf("error getting dd-agent user and group IDs: %w", err) } if config.Datadog != nil { - err = os.WriteFile(filepath.Join(tmpDir, configDatadogYAML), []byte(config.Datadog), 0640) + err = os.WriteFile(filepath.Join(dir, configDatadogYAML), []byte(config.Datadog), 0640) if err != nil { return fmt.Errorf("could not write datadog.yaml: %w", err) } - err = os.Chown(filepath.Join(tmpDir, configDatadogYAML), ddAgentUID, ddAgentGID) + err = os.Chown(filepath.Join(dir, configDatadogYAML), ddAgentUID, ddAgentGID) if err != nil { return fmt.Errorf("could not chown datadog.yaml: %w", err) } } if config.SecurityAgent != nil { - err = os.WriteFile(filepath.Join(tmpDir, configSecurityAgentYAML), []byte(config.SecurityAgent), 0600) + err = os.WriteFile(filepath.Join(dir, configSecurityAgentYAML), []byte(config.SecurityAgent), 0600) if err != nil { return fmt.Errorf("could not write datadog.yaml: %w", err) } } if config.SystemProbe != nil { - err = os.WriteFile(filepath.Join(tmpDir, configSystemProbeYAML), []byte(config.SystemProbe), 0600) + err = os.WriteFile(filepath.Join(dir, configSystemProbeYAML), []byte(config.SystemProbe), 0600) if err != nil { return fmt.Errorf("could not write datadog.yaml: %w", err) } } - - err = configs.Create(agentPackage, config.Version, tmpDir) - if err != nil { - return fmt.Errorf("could not create repository: %w", err) - } return nil } diff --git a/pkg/fleet/installer/service/datadog_agent_windows.go b/pkg/fleet/installer/service/datadog_agent_windows.go index 736b39b5eaef7..62484d42eebc5 100644 --- a/pkg/fleet/installer/service/datadog_agent_windows.go +++ b/pkg/fleet/installer/service/datadog_agent_windows.go @@ -10,9 +10,13 @@ package service import ( "context" + "fmt" + "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" "github.com/DataDog/datadog-agent/pkg/fleet/internal/cdn" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/winregistry" "github.com/DataDog/datadog-agent/pkg/util/log" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) @@ -30,8 +34,8 @@ func SetupAgent(ctx context.Context, args []string) (err error) { span.Finish(tracer.WithError(err)) }() // Make sure there are no Agent already installed - _ = removeProduct("Datadog Agent") - err = msiexec("stable", datadogAgent, "/i", args) + _ = removeAgentIfInstalled(ctx) + err = installAgentPackage("stable", args) return err } @@ -44,8 +48,18 @@ func StartAgentExperiment(ctx context.Context) (err error) { } span.Finish(tracer.WithError(err)) }() - err = msiexec("experiment", datadogAgent, "/i", nil) - return err + + err = removeAgentIfInstalled(ctx) + if err != nil { + return err + } + + err = installAgentPackage("experiment", nil) + if err != nil { + // experiment failed, expect stop-experiment to restore the stable Agent + return err + } + return nil } // StopAgentExperiment stops the agent experiment, i.e. removes/uninstalls it. @@ -57,14 +71,19 @@ func StopAgentExperiment(ctx context.Context) (err error) { } span.Finish(tracer.WithError(err)) }() - err = msiexec("experiment", datadogAgent, "/x", nil) + + err = removeAgentIfInstalled(ctx) if err != nil { return err } - // TODO: Need args here to restore DDAGENTUSER - err = msiexec("stable", datadogAgent, "/i", nil) - return err + err = installAgentPackage("stable", nil) + if err != nil { + // if we cannot restore the stable Agent, the system is left without an Agent + return err + } + + return nil } // PromoteAgentExperiment promotes the agent experiment @@ -75,18 +94,56 @@ func PromoteAgentExperiment(_ context.Context) error { // RemoveAgent stops and removes the agent func RemoveAgent(ctx context.Context) (err error) { - span, _ := tracer.StartSpanFromContext(ctx, "remove_agent") - defer func() { - if err != nil { - log.Errorf("Failed to remove agent: %s", err) - } - span.Finish(tracer.WithError(err)) - }() - err = removeProduct("Datadog Agent") - return err + // Don't return an error if the Agent is already not installed. + // returning an error here will prevent the package from being removed + // from the local repository. + return removeAgentIfInstalled(ctx) } // ConfigureAgent noop -func ConfigureAgent(_ context.Context, _ *cdn.CDN, _ *repository.Repositories) error { +func ConfigureAgent(_ context.Context, _ cdn.CDN, _ *repository.Repositories) error { + return nil +} + +// WriteAgentConfig noop +func WriteAgentConfig(_ *cdn.Config, _ string) error { + return nil +} + +func installAgentPackage(target string, args []string) error { + // Lookup stored Agent user and pass it to the Agent MSI + // TODO: bootstrap doesn't have a command-line agent user parameter yet, + // might need to update this when it does. + agentUser, err := winregistry.GetAgentUserName() + if err != nil { + return fmt.Errorf("failed to get Agent user: %w", err) + } + args = append(args, fmt.Sprintf("DDAGENTUSER_NAME=%s", agentUser)) + + err = msiexec(target, datadogAgent, "/i", args) + if err != nil { + return fmt.Errorf("failed to install Agent %s: %w", target, err) + } + return nil +} + +func removeAgentIfInstalled(ctx context.Context) (err error) { + if isProductInstalled("Datadog Agent") { + span, _ := tracer.StartSpanFromContext(ctx, "remove_agent") + defer func() { + if err != nil { + // removal failed, this should rarely happen. + // Rollback might have restored the Agent, but we can't be sure. + log.Errorf("Failed to remove agent: %s", err) + } + span.Finish(tracer.WithError(err)) + }() + err := removeProduct("Datadog Agent") + if err != nil { + return err + } + } else { + log.Debugf("Agent not installed") + } return nil } diff --git a/pkg/fleet/installer/service/datadog_installer.go b/pkg/fleet/installer/service/datadog_installer.go index c410a2b2fd32e..17eff00d3c3f1 100644 --- a/pkg/fleet/installer/service/datadog_installer.go +++ b/pkg/fleet/installer/service/datadog_installer.go @@ -44,7 +44,7 @@ func addDDAgentGroup(ctx context.Context) error { func SetupInstaller(ctx context.Context) (err error) { defer func() { if err != nil { - log.Errorf("Failed to setup installer: %s, reverting", err) + log.Errorf("Failed to setup installer, reverting: %s", err) err = RemoveInstaller(ctx) } }() diff --git a/pkg/fleet/installer/service/embedded/datadog-agent-security-exp.service b/pkg/fleet/installer/service/embedded/datadog-agent-security-exp.service index c86acfd7ee24b..4dd5ae531f298 100644 --- a/pkg/fleet/installer/service/embedded/datadog-agent-security-exp.service +++ b/pkg/fleet/installer/service/embedded/datadog-agent-security-exp.service @@ -10,7 +10,7 @@ PIDFile=/opt/datadog-packages/datadog-agent/experiment/run/security-agent.pid Restart=on-failure EnvironmentFile=-/etc/datadog-agent/environment Environment="DD_FLEET_POLICIES_DIR=/etc/datadog-packages/datadog-agent/experiment" -ExecStart=/opt/datadog-packages/datadog-agent/experiment/embedded/bin/security-agent -c /etc/datadog-agent/datadog.yaml --pidfile /opt/datadog-packages/datadog-agent/experiment/run/security-agent.pid +ExecStart=/opt/datadog-packages/datadog-agent/experiment/embedded/bin/security-agent start -c /etc/datadog-agent/datadog.yaml --pidfile /opt/datadog-packages/datadog-agent/experiment/run/security-agent.pid # Since systemd 229, should be in [Unit] but in order to support systemd <229, # it is also supported to have it here. StartLimitInterval=10 diff --git a/pkg/fleet/installer/service/embedded/datadog-agent-security.service b/pkg/fleet/installer/service/embedded/datadog-agent-security.service index a71b46142be9b..e5d9602b4c951 100644 --- a/pkg/fleet/installer/service/embedded/datadog-agent-security.service +++ b/pkg/fleet/installer/service/embedded/datadog-agent-security.service @@ -10,7 +10,7 @@ PIDFile=/opt/datadog-packages/datadog-agent/stable/run/security-agent.pid Restart=on-failure EnvironmentFile=-/etc/datadog-agent/environment Environment="DD_FLEET_POLICIES_DIR=/etc/datadog-packages/datadog-agent/stable" -ExecStart=/opt/datadog-packages/datadog-agent/stable/embedded/bin/security-agent -c /etc/datadog-agent/datadog.yaml --pidfile /opt/datadog-packages/datadog-agent/stable/run/security-agent.pid +ExecStart=/opt/datadog-packages/datadog-agent/stable/embedded/bin/security-agent start -c /etc/datadog-agent/datadog.yaml --pidfile /opt/datadog-packages/datadog-agent/stable/run/security-agent.pid # Since systemd 229, should be in [Unit] but in order to support systemd <229, # it is also supported to have it here. StartLimitInterval=10 diff --git a/pkg/fleet/installer/service/msiexec.go b/pkg/fleet/installer/service/msiexec.go index 287ccdd6ace6c..4e5ed5897021c 100644 --- a/pkg/fleet/installer/service/msiexec.go +++ b/pkg/fleet/installer/service/msiexec.go @@ -10,6 +10,7 @@ package service import ( "fmt" "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" + "github.com/DataDog/datadog-agent/pkg/util/log" "golang.org/x/sys/windows/registry" "os/exec" "path/filepath" @@ -84,6 +85,7 @@ func processKey(rootPath, key, name string) (*Product, error) { // reflect the installed version, and using those installers can lead to undefined behavior (either failure to uninstall, // or weird bugs from uninstalling a product with an installer from a different version). func removeProduct(productName string) error { + log.Debugf("Removing product %s", productName) product, err := findProductCode(productName) if err != nil { return fmt.Errorf("error trying to find product %s: %w", productName, err) @@ -94,3 +96,11 @@ func removeProduct(productName string) error { } return fmt.Errorf("product %s not found", productName) } + +func isProductInstalled(productName string) bool { + product, err := findProductCode(productName) + if err != nil { + return false + } + return product != nil +} diff --git a/pkg/fleet/installer/service/systemd.go b/pkg/fleet/installer/service/systemd.go index 9242d465dd815..264394b6858f2 100644 --- a/pkg/fleet/installer/service/systemd.go +++ b/pkg/fleet/installer/service/systemd.go @@ -10,8 +10,10 @@ package service import ( "context" + "errors" "fmt" "os" + "os/exec" "path" "path/filepath" @@ -22,39 +24,78 @@ import ( const systemdPath = "/etc/systemd/system" -func stopUnit(ctx context.Context, unit string, args ...string) error { +func stopUnit(ctx context.Context, unit string, args ...string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "stop_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) args = append([]string{"stop", unit}, args...) - return newCommandRunner(ctx, "systemctl", args...).runWithError() + err = exec.CommandContext(ctx, "systemctl", args...).Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + if exitErr.ExitCode() == 5 { + // exit code 5 means the unit is not loaded, we can continue + return nil + } + return errors.New(string(exitErr.Stderr)) } -func startUnit(ctx context.Context, unit string, args ...string) error { +func startUnit(ctx context.Context, unit string, args ...string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "start_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) args = append([]string{"start", unit}, args...) - return newCommandRunner(ctx, "systemctl", args...).runWithError() + err = exec.CommandContext(ctx, "systemctl", args...).Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + return errors.New(string(exitErr.Stderr)) } -func enableUnit(ctx context.Context, unit string) error { +func enableUnit(ctx context.Context, unit string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "enable_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) - return newCommandRunner(ctx, "systemctl", "enable", unit).runWithError() + err = exec.CommandContext(ctx, "systemctl", "enable", unit).Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + return errors.New(string(exitErr.Stderr)) } -func disableUnit(ctx context.Context, unit string) error { +func disableUnit(ctx context.Context, unit string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "disable_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) - return newCommandRunner(ctx, "systemctl", "disable", unit).runWithError() + + enabledErr := exec.CommandContext(ctx, "systemctl", "is-enabled", "--quiet", unit).Run() + if enabledErr != nil { + // unit is already disabled or doesn't exist, we can return fast + return nil + } + + err = exec.CommandContext(ctx, "systemctl", "disable", unit).Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + if exitErr.ExitCode() == 5 { + // exit code 5 means the unit is not loaded, we can continue + return nil + } + return errors.New(string(exitErr.Stderr)) } -func loadUnit(ctx context.Context, unit string) error { +func loadUnit(ctx context.Context, unit string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "load_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) content, err := embedded.FS.ReadFile(unit) if err != nil { @@ -64,17 +105,23 @@ func loadUnit(ctx context.Context, unit string) error { return os.WriteFile(unitPath, content, 0644) } -func removeUnit(ctx context.Context, unit string) error { +func removeUnit(ctx context.Context, unit string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "remove_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) return os.Remove(path.Join(systemdPath, unit)) } -func systemdReload(ctx context.Context) error { +func systemdReload(ctx context.Context) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "systemd_reload") - defer span.Finish() - return newCommandRunner(ctx, "systemctl", "daemon-reload").runWithError() + defer func() { span.Finish(tracer.WithError(err)) }() + err = exec.CommandContext(ctx, "systemctl", "daemon-reload").Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + return errors.New(string(exitErr.Stderr)) } // isSystemdRunning checks if systemd is running using the documented way diff --git a/pkg/fleet/installer/setup.go b/pkg/fleet/installer/setup.go new file mode 100644 index 0000000000000..1def62be3c752 --- /dev/null +++ b/pkg/fleet/installer/setup.go @@ -0,0 +1,33 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package installer contains the installer subcommands +package installer + +import ( + "context" + "fmt" + "os" + + "github.com/DataDog/datadog-agent/pkg/fleet/env" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" +) + +// Setup is the main function to resolve packages to install and install them +func Setup(ctx context.Context, env *env.Env) error { + cmd := exec.NewInstallerExec(env, paths.StableInstallerPath) + defaultPackages, err := cmd.DefaultPackages(ctx) + if err != nil { + return fmt.Errorf("failed to get default packages: %w", err) + } + for _, url := range defaultPackages { + err = cmd.Install(ctx, url, nil) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to install package %s: %v\n", url, err) + } + } + return nil +} diff --git a/pkg/fleet/internal/bootstrap/bootstrap.go b/pkg/fleet/internal/bootstrap/bootstrap.go index f34d43764d61a..e16f0cd9d603e 100644 --- a/pkg/fleet/internal/bootstrap/bootstrap.go +++ b/pkg/fleet/internal/bootstrap/bootstrap.go @@ -8,21 +8,13 @@ package bootstrap import ( "context" - "fmt" - "net/http" - "os" - "path/filepath" - "github.com/DataDog/datadog-agent/pkg/fleet/env" - "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" - "github.com/DataDog/datadog-agent/pkg/fleet/internal/oci" ) const ( - installerPackage = "datadog-installer" + // InstallerPackage is the name of the Datadog Installer OCI package + InstallerPackage = "datadog-installer" installerBinPath = "bin/installer/installer" - - rootTmpDir = "/opt/datadog-installer/tmp" ) // Install self-installs the installer package from the given URL. @@ -34,63 +26,3 @@ func Install(ctx context.Context, env *env.Env, url string) error { func InstallExperiment(ctx context.Context, env *env.Env, url string) error { return install(ctx, env, url, true) } - -func install(ctx context.Context, env *env.Env, url string, experiment bool) error { - err := os.MkdirAll(rootTmpDir, 0755) - if err != nil { - return fmt.Errorf("failed to create temporary directory: %w", err) - } - tmpDir, err := os.MkdirTemp(rootTmpDir, "") - if err != nil { - return fmt.Errorf("failed to create temporary directory: %w", err) - } - defer os.RemoveAll(tmpDir) - cmd, err := downloadInstaller(ctx, env, url, tmpDir) - if err != nil { - return fmt.Errorf("failed to download installer: %w", err) - } - if experiment { - return cmd.InstallExperiment(ctx, url) - } - return cmd.Install(ctx, url, nil) -} - -// downloadInstaller downloads the installer package from the registry and returns an installer executor. -// -// This process is made to have the least assumption possible as it is long lived and should always work in the future. -// 1. Download the installer package from the registry. -// 2. Export the installer image as an OCI layout on the disk. -// 3. Extract the installer image layers on the disk. -// 4. Create an installer executor from the extract layer. -func downloadInstaller(ctx context.Context, env *env.Env, url string, tmpDir string) (*exec.InstallerExec, error) { - // 1. Download the installer package from the registry. - downloader := oci.NewDownloader(env, http.DefaultClient) - downloadedPackage, err := downloader.Download(ctx, url) - if err != nil { - return nil, fmt.Errorf("failed to download installer package: %w", err) - } - if downloadedPackage.Name != installerPackage { - return nil, fmt.Errorf("unexpected package name: %s, expected %s", downloadedPackage.Name, installerPackage) - } - - // 2. Export the installer image as an OCI layout on the disk. - layoutTmpDir, err := os.MkdirTemp(rootTmpDir, "") - if err != nil { - return nil, fmt.Errorf("failed to create temporary directory: %w", err) - } - defer os.RemoveAll(layoutTmpDir) - err = downloadedPackage.WriteOCILayout(layoutTmpDir) - if err != nil { - return nil, fmt.Errorf("failed to write OCI layout: %w", err) - } - - // 3. Extract the installer image layers on the disk. - err = downloadedPackage.ExtractLayers(oci.DatadogPackageLayerMediaType, tmpDir) - if err != nil { - return nil, fmt.Errorf("failed to extract layers: %w", err) - } - - // 4. Create an installer executor from the extract layer. - installerBinPath := filepath.Join(tmpDir, installerBinPath) - return exec.NewInstallerExec(env, installerBinPath), nil -} diff --git a/pkg/fleet/internal/bootstrap/bootstrap_nix.go b/pkg/fleet/internal/bootstrap/bootstrap_nix.go new file mode 100644 index 0000000000000..83b0276814933 --- /dev/null +++ b/pkg/fleet/internal/bootstrap/bootstrap_nix.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +// Package bootstrap provides logic to self-bootstrap the installer. +package bootstrap + +import ( + "context" + "fmt" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" + "net/http" + "os" + "path/filepath" + + "github.com/DataDog/datadog-agent/pkg/fleet/env" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/oci" +) + +func install(ctx context.Context, env *env.Env, url string, experiment bool) error { + err := os.MkdirAll(paths.RootTmpDir, 0755) + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + tmpDir, err := os.MkdirTemp(paths.RootTmpDir, "") + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + cmd, err := downloadInstaller(ctx, env, url, tmpDir) + if err != nil { + return fmt.Errorf("failed to download installer: %w", err) + } + if experiment { + return cmd.InstallExperiment(ctx, url) + } + return cmd.Install(ctx, url, nil) +} + +// downloadInstaller downloads the installer package from the registry and returns an installer executor. +// +// This process is made to have the least assumption possible as it is long lived and should always work in the future. +// 1. Download the installer package from the registry. +// 2. Export the installer image as an OCI layout on the disk. +// 3. Extract the installer image layers on the disk. +// 4. Create an installer executor from the extract layer. +func downloadInstaller(ctx context.Context, env *env.Env, url string, tmpDir string) (*exec.InstallerExec, error) { + // 1. Download the installer package from the registry. + downloader := oci.NewDownloader(env, http.DefaultClient) + downloadedPackage, err := downloader.Download(ctx, url) + if err != nil { + return nil, fmt.Errorf("failed to download installer package: %w", err) + } + if downloadedPackage.Name != InstallerPackage { + return nil, fmt.Errorf("unexpected package name: %s, expected %s", downloadedPackage.Name, InstallerPackage) + } + + // 2. Export the installer image as an OCI layout on the disk. + layoutTmpDir, err := os.MkdirTemp(paths.RootTmpDir, "") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(layoutTmpDir) + err = downloadedPackage.WriteOCILayout(layoutTmpDir) + if err != nil { + return nil, fmt.Errorf("failed to write OCI layout: %w", err) + } + + // 3. Extract the installer image layers on the disk. + err = downloadedPackage.ExtractLayers(oci.DatadogPackageLayerMediaType, tmpDir) + if err != nil { + return nil, fmt.Errorf("failed to extract layers: %w", err) + } + + // 4. Create an installer executor from the extract layer. + installerBinPath := filepath.Join(tmpDir, installerBinPath) + return exec.NewInstallerExec(env, installerBinPath), nil +} diff --git a/pkg/fleet/internal/bootstrap/bootstrap_windows.go b/pkg/fleet/internal/bootstrap/bootstrap_windows.go new file mode 100644 index 0000000000000..43e1f64b430ac --- /dev/null +++ b/pkg/fleet/internal/bootstrap/bootstrap_windows.go @@ -0,0 +1,85 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build windows + +// Package bootstrap provides logic to self-bootstrap the installer. +package bootstrap + +import ( + "context" + "fmt" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" + "net/http" + "os" + "os/exec" + "path/filepath" + + "github.com/DataDog/datadog-agent/pkg/fleet/env" + iexec "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/oci" +) + +func install(ctx context.Context, env *env.Env, url string, experiment bool) error { + err := os.MkdirAll(paths.RootTmpDir, 0755) + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + tmpDir, err := os.MkdirTemp(paths.RootTmpDir, "") + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + cmd, err := downloadInstaller(ctx, env, url, tmpDir) + if err != nil { + return fmt.Errorf("failed to download installer: %w", err) + } + if experiment { + return cmd.InstallExperiment(ctx, url) + } + return cmd.Install(ctx, url, nil) +} + +// downloadInstaller downloads the installer package from the registry and returns the path to the executable. +func downloadInstaller(ctx context.Context, env *env.Env, url string, tmpDir string) (*iexec.InstallerExec, error) { + downloader := oci.NewDownloader(env, http.DefaultClient) + downloadedPackage, err := downloader.Download(ctx, url) + if err != nil { + return nil, fmt.Errorf("failed to download installer package: %w", err) + } + if downloadedPackage.Name != InstallerPackage { + return nil, fmt.Errorf("unexpected package name: %s, expected %s", downloadedPackage.Name, InstallerPackage) + } + + layoutTmpDir, err := os.MkdirTemp(paths.RootTmpDir, "") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(layoutTmpDir) + err = downloadedPackage.WriteOCILayout(layoutTmpDir) + if err != nil { + return nil, fmt.Errorf("failed to write OCI layout: %w", err) + } + + err = downloadedPackage.ExtractLayers(oci.DatadogPackageLayerMediaType, tmpDir) + if err != nil { + return nil, fmt.Errorf("failed to extract layers: %w", err) + } + + msis, err := filepath.Glob(filepath.Join(tmpDir, "datadog-installer-*-1-x86_64.msi")) + if err != nil { + return nil, err + } + if len(msis) > 1 { + return nil, fmt.Errorf("too many MSIs in package") + } else if len(msis) == 0 { + return nil, fmt.Errorf("no MSIs in package") + } + err = exec.Command("msiexec", "/i", msis[0], "/qn", "MSIFASTINSTALL=7").Run() + if err != nil { + return nil, fmt.Errorf("failed to install the Datadog Installer") + } + return iexec.NewInstallerExec(env, paths.StableInstallerPath), nil +} diff --git a/pkg/fleet/internal/cdn/cdn.go b/pkg/fleet/internal/cdn/cdn.go index 53d93568777b4..4b975e30fea6a 100644 --- a/pkg/fleet/internal/cdn/cdn.go +++ b/pkg/fleet/internal/cdn/cdn.go @@ -11,19 +11,12 @@ import ( "encoding/json" "fmt" "regexp" - "time" - "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" - "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl" - detectenv "github.com/DataDog/datadog-agent/pkg/config/env" - "github.com/DataDog/datadog-agent/pkg/config/model" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/fleet/env" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - pkghostname "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/version" - "github.com/google/uuid" + "github.com/DataDog/go-tuf/data" "go.uber.org/multierr" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) @@ -33,8 +26,14 @@ var datadogConfigIDRegexp = regexp.MustCompile(`^datadog/\d+/AGENT_CONFIG/([^/]+ const configOrderID = "configuration_order" // CDN provides access to the Remote Config CDN. -type CDN struct { - env *env.Env +type CDN interface { + Get(ctx context.Context) (*Config, error) + Close() error +} + +type cdnRemote struct { + client *remoteconfig.HTTPClient + currentRootsVersion uint64 } // Config represents the configuration from the CDN. @@ -50,15 +49,32 @@ type orderConfig struct { } // New creates a new CDN. -func New(env *env.Env) *CDN { - return &CDN{ - env: env, +func New(env *env.Env, configDBPath string) (CDN, error) { + if env.CDNLocalDirPath != "" { + return newLocal(env) + } + return newRemote(env, configDBPath) +} + +func newRemote(env *env.Env, configDBPath string) (CDN, error) { + client, err := remoteconfig.NewHTTPClient( + configDBPath, + env.Site, + env.APIKey, + version.AgentVersion, + ) + if err != nil { + return nil, err } + return &cdnRemote{ + client: client, + currentRootsVersion: 1, + }, nil } // Get gets the configuration from the CDN. -func (c *CDN) Get(ctx context.Context) (_ *Config, err error) { - span, ctx := tracer.StartSpanFromContext(ctx, "cdn.Get") +func (c *cdnRemote) Get(ctx context.Context) (_ *Config, err error) { + span, _ := tracer.StartSpanFromContext(ctx, "cdn.Get") defer func() { span.Finish(tracer.WithError(err)) }() configLayers, err := c.getOrderedLayers(ctx) if err != nil { @@ -67,71 +83,71 @@ func (c *CDN) Get(ctx context.Context) (_ *Config, err error) { return newConfig(configLayers...) } +// Close cleans up the CDN's resources +func (c *cdnRemote) Close() error { + return c.client.Close() +} + // getOrderedLayers calls the Remote Config service to get the ordered layers. -// Today it doesn't use the CDN, but it should in the future -func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) { - // HACK(baptiste): Create a dedicated one-shot RC service just for the configuration - // We should use the CDN instead - config := pkgconfigsetup.Datadog() - config.Set("run_path", "/opt/datadog-packages/datadog-installer/stable/run", model.SourceAgentRuntime) - - detectenv.DetectFeatures(config) - hostname, err := pkghostname.Get(ctx) - if err != nil { - return nil, err - } - options := []remoteconfig.Option{ - remoteconfig.WithAPIKey(c.env.APIKey), - remoteconfig.WithConfigRootOverride(c.env.Site, ""), - remoteconfig.WithDirectorRootOverride(c.env.Site, ""), - } - service, err := remoteconfig.NewService( - config, - "Datadog Installer", - fmt.Sprintf("https://config.%s", c.env.Site), - hostname, - getHostTags(ctx, config), - &rctelemetryreporterimpl.DdRcTelemetryReporter{}, // No telemetry for this client - version.AgentVersion, - options..., +func (c *cdnRemote) getOrderedLayers(ctx context.Context) ([]*layer, error) { + agentConfigUpdate, err := c.client.GetCDNConfigUpdate( + ctx, + []string{"AGENT_CONFIG"}, + // Always send 0 since we are relying on the CDN cache state instead of our own tracer cache. This will fetch the latest configs from the cache/CDN everytime. + 0, + // Not using the roots; send the highest seen version of roots so don't received them all on every request + c.currentRootsVersion, + // Not using a client cache; fetch all the applicable target files every time. + []*pbgo.TargetFileMeta{}, ) if err != nil { return nil, err } - service.Start() - defer func() { _ = service.Stop() }() - // Force a cache bypass - cfgs, err := service.ClientGetConfigs(ctx, &pbgo.ClientGetConfigsRequest{ - Client: &pbgo.Client{ - Id: uuid.New().String(), - Products: []string{"AGENT_CONFIG"}, - IsUpdater: true, - ClientUpdater: &pbgo.ClientUpdater{}, - State: &pbgo.ClientState{ - RootVersion: 1, - TargetsVersion: 1, - }, - }, - }) - if err != nil { - return nil, err + + orderedLayers := []*layer{} + if agentConfigUpdate == nil { + return orderedLayers, nil + } + + // Update CDN root versions + for _, root := range agentConfigUpdate.TUFRoots { + var signedRoot data.Signed + err = json.Unmarshal(root, &signedRoot) + if err != nil { + continue + } + var r data.Root + err = json.Unmarshal(signedRoot.Signed, &r) + if err != nil { + continue + } + if uint64(r.Version) > c.currentRootsVersion { + c.currentRootsVersion = uint64(r.Version) + } } // Unmarshal RC results configLayers := map[string]*layer{} var configOrder *orderConfig var layersErr error - for _, file := range cfgs.TargetFiles { - matched := datadogConfigIDRegexp.FindStringSubmatch(file.GetPath()) + paths := agentConfigUpdate.ClientConfigs + targetFiles := agentConfigUpdate.TargetFiles + for _, path := range paths { + matched := datadogConfigIDRegexp.FindStringSubmatch(path) if len(matched) != 2 { - layersErr = multierr.Append(layersErr, fmt.Errorf("invalid config path: %s", file.GetPath())) + layersErr = multierr.Append(layersErr, fmt.Errorf("invalid config path: %s", path)) continue } configName := matched[1] + file, ok := targetFiles[path] + if !ok { + layersErr = multierr.Append(layersErr, fmt.Errorf("missing expected target file in update response: %s", path)) + continue + } if configName != configOrderID { configLayer := &layer{} - err = json.Unmarshal(file.GetRaw(), configLayer) + err = json.Unmarshal(file, configLayer) if err != nil { // If a layer is wrong, fail later to parse the rest and check them all layersErr = multierr.Append(layersErr, err) @@ -140,7 +156,7 @@ func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) { configLayers[configName] = configLayer } else { configOrder = &orderConfig{} - err = json.Unmarshal(file.GetRaw(), configOrder) + err = json.Unmarshal(file, configOrder) if err != nil { // Return first - we can't continue without the order return nil, err @@ -151,30 +167,19 @@ func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) { return nil, layersErr } - // Order configs if configOrder == nil { return nil, fmt.Errorf("no configuration_order found") } + + return orderLayers(*configOrder, configLayers), nil +} + +func orderLayers(configOrder orderConfig, configLayers map[string]*layer) []*layer { orderedLayers := []*layer{} for _, configName := range configOrder.Order { if configLayer, ok := configLayers[configName]; ok { orderedLayers = append(orderedLayers, configLayer) } } - - return orderedLayers, nil -} - -func getHostTags(ctx context.Context, config model.Config) func() []string { - return func() []string { - // Host tags are cached on host, but we add a timeout to avoid blocking the RC request - // if the host tags are not available yet and need to be fetched. They will be fetched - // by the first agent metadata V5 payload. - ctx, cc := context.WithTimeout(ctx, time.Second) - defer cc() - hostTags := hosttags.Get(ctx, true, config) - tags := append(hostTags.System, hostTags.GoogleCloudPlatform...) - tags = append(tags, "installer:true") - return tags - } + return orderedLayers } diff --git a/pkg/fleet/internal/cdn/cdn_local.go b/pkg/fleet/internal/cdn/cdn_local.go new file mode 100644 index 0000000000000..11872a4fefe56 --- /dev/null +++ b/pkg/fleet/internal/cdn/cdn_local.go @@ -0,0 +1,72 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package cdn + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/DataDog/datadog-agent/pkg/fleet/env" +) + +type cdnLocal struct { + dirPath string +} + +// newLocal creates a new local CDN. +func newLocal(env *env.Env) (CDN, error) { + return &cdnLocal{ + dirPath: env.CDNLocalDirPath, + }, nil +} + +// Get gets the configuration from the CDN. +func (c *cdnLocal) Get(_ context.Context) (_ *Config, err error) { + files, err := os.ReadDir(c.dirPath) + if err != nil { + return nil, fmt.Errorf("couldn't read directory %s: %w", c.dirPath, err) + } + + var configOrder *orderConfig + var configLayers = make(map[string]*layer) + for _, file := range files { + if file.IsDir() { + continue + } + + contents, err := os.ReadFile(filepath.Join(c.dirPath, file.Name())) + if err != nil { + return nil, fmt.Errorf("couldn't read file %s: %w", file.Name(), err) + } + + if file.Name() == configOrderID { + err = json.Unmarshal(contents, &configOrder) + if err != nil { + return nil, fmt.Errorf("couldn't unmarshal config order %s: %w", file.Name(), err) + } + } else { + configLayer := &layer{} + err = json.Unmarshal(contents, configLayer) + if err != nil { + return nil, fmt.Errorf("couldn't unmarshal file %s: %w", file.Name(), err) + } + configLayers[file.Name()] = configLayer + } + } + + if configOrder == nil { + return nil, fmt.Errorf("no configuration_order found") + } + + return newConfig(orderLayers(*configOrder, configLayers)...) +} + +func (c *cdnLocal) Close() error { + return nil +} diff --git a/pkg/fleet/internal/cdn/config.go b/pkg/fleet/internal/cdn/config.go index 979845c31aec8..fda375d007afa 100644 --- a/pkg/fleet/internal/cdn/config.go +++ b/pkg/fleet/internal/cdn/config.go @@ -36,8 +36,9 @@ func newConfig(layers ...*layer) (_ *Config, err error) { SystemProbeConfig: map[string]interface{}{}, } - // Merge all layers in order - for _, l := range layers { + // Merge all layers in reverse order (first layer has precedence) + for i := len(layers) - 1; i >= 0; i-- { + l := layers[i] layerIDs = append(layerIDs, l.ID) if l.AgentConfig != nil { agentConfig, err := merge(mergedLayer.AgentConfig, l.AgentConfig) diff --git a/pkg/fleet/internal/cdn/config_test.go b/pkg/fleet/internal/cdn/config_test.go index 78dd772a456b6..0f0f26bc1ac00 100644 --- a/pkg/fleet/internal/cdn/config_test.go +++ b/pkg/fleet/internal/cdn/config_test.go @@ -38,10 +38,10 @@ api_key: "1234" apm: enabled: true env: prod - sampling_rate: 0.7 + sampling_rate: 0.5 fleet_layers: -- base - override +- base ` assert.Equal(t, expectedConfig, string(config.Datadog)) } diff --git a/pkg/fleet/internal/exec/installer_exec.go b/pkg/fleet/internal/exec/installer_exec.go index 6ac122dfdfa1e..79ff8d37692e5 100644 --- a/pkg/fleet/internal/exec/installer_exec.go +++ b/pkg/fleet/internal/exec/installer_exec.go @@ -12,6 +12,7 @@ import ( "fmt" "os" "os/exec" + "runtime" "strings" "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" @@ -23,13 +24,6 @@ import ( "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) -const ( - // StableInstallerPath is the path to the stable installer binary. - StableInstallerPath = "/opt/datadog-packages/datadog-installer/stable/bin/installer/installer" - // ExperimentInstallerPath is the path to the experiment installer binary. - ExperimentInstallerPath = "/opt/datadog-packages/datadog-installer/experiment/bin/installer/installer" -) - // InstallerExec is an implementation of the Installer interface that uses the installer binary. type InstallerExec struct { env *env.Env @@ -56,8 +50,12 @@ func (i *InstallerExec) newInstallerCmd(ctx context.Context, command string, arg span.SetTag("args", args) cmd := exec.CommandContext(ctx, i.installerBinPath, append([]string{command}, args...)...) env = append(os.Environ(), env...) - cmd.Cancel = func() error { - return cmd.Process.Signal(os.Interrupt) + if runtime.GOOS != "windows" { + // os.Interrupt is not support on Windows + // It gives " run failed: exec: canceling Cmd: not supported by windows" + cmd.Cancel = func() error { + return cmd.Process.Signal(os.Interrupt) + } } env = append(env, telemetry.EnvFromSpanContext(span.Context())...) cmd.Env = env @@ -110,6 +108,27 @@ func (i *InstallerExec) PromoteExperiment(ctx context.Context, pkg string) (err return cmd.Run() } +// InstallConfigExperiment installs an experiment. +func (i *InstallerExec) InstallConfigExperiment(ctx context.Context, url string, version string) (err error) { + cmd := i.newInstallerCmd(ctx, "install-config-experiment", url, version) + defer func() { cmd.span.Finish(tracer.WithError(err)) }() + return cmd.Run() +} + +// RemoveConfigExperiment removes an experiment. +func (i *InstallerExec) RemoveConfigExperiment(ctx context.Context, pkg string) (err error) { + cmd := i.newInstallerCmd(ctx, "remove-config-experiment", pkg) + defer func() { cmd.span.Finish(tracer.WithError(err)) }() + return cmd.Run() +} + +// PromoteConfigExperiment promotes an experiment to stable. +func (i *InstallerExec) PromoteConfigExperiment(ctx context.Context, pkg string) (err error) { + cmd := i.newInstallerCmd(ctx, "promote-config-experiment", pkg) + defer func() { cmd.span.Finish(tracer.WithError(err)) }() + return cmd.Run() +} + // GarbageCollect runs the garbage collector. func (i *InstallerExec) GarbageCollect(ctx context.Context) (err error) { cmd := i.newInstallerCmd(ctx, "garbage-collect") @@ -167,6 +186,19 @@ func (i *InstallerExec) DefaultPackages(ctx context.Context) (_ []string, err er return defaultPackages, nil } +// Setup runs the setup command. +func (i *InstallerExec) Setup(ctx context.Context) (err error) { + cmd := i.newInstallerCmd(ctx, "setup") + defer func() { cmd.span.Finish(tracer.WithError(err)) }() + var stderr bytes.Buffer + cmd.Stderr = &stderr + err = cmd.Run() + if err != nil { + return fmt.Errorf("error running setup: %w\n%s", err, stderr.String()) + } + return nil +} + // State returns the state of a package. func (i *InstallerExec) State(pkg string) (repository.State, error) { repositories := repository.NewRepositories(paths.PackagesPath, paths.LocksPath) @@ -195,6 +227,11 @@ func (i *InstallerExec) ConfigStates() (map[string]repository.State, error) { return states, err } +// Close cleans up any resources. +func (i *InstallerExec) Close() error { + return nil +} + func (iCmd *installerCmd) Run() error { var errBuf bytes.Buffer iCmd.Stderr = &errBuf diff --git a/pkg/fleet/internal/oci/download.go b/pkg/fleet/internal/oci/download.go index 95593831dc901..6900c771dc942 100644 --- a/pkg/fleet/internal/oci/download.go +++ b/pkg/fleet/internal/oci/download.go @@ -41,6 +41,8 @@ const ( RegistryAuthGCR string = "gcr" // RegistryAuthECR is the Amazon Elastic Container Registry authentication method. RegistryAuthECR string = "ecr" + // RegistryAuthPassword is the password registry authentication method. + RegistryAuthPassword string = "password" ) const ( @@ -132,12 +134,17 @@ func (d *Downloader) Download(ctx context.Context, packageURL string) (*Download }, nil } -func getKeychain(auth string) authn.Keychain { +func getKeychain(auth string, username string, password string) authn.Keychain { switch auth { case RegistryAuthGCR: return google.Keychain case RegistryAuthECR: return authn.NewKeychainFromHelper(ecr.NewECRHelper()) + case RegistryAuthPassword: + return usernamePasswordKeychain{ + username: username, + password: password, + } case RegistryAuthDefault, "": return authn.DefaultKeychain default: @@ -169,10 +176,10 @@ func getRefAndKeychain(env *env.Env, url string) urlWithKeychain { } ref = registryOverride + imageWithIdentifier } - keychain := getKeychain(env.RegistryAuthOverride) + keychain := getKeychain(env.RegistryAuthOverride, env.RegistryUsername, env.RegistryPassword) for image, override := range env.RegistryAuthOverrideByImage { if strings.HasPrefix(imageWithIdentifier, image+":") || strings.HasPrefix(imageWithIdentifier, image+"@") { - keychain = getKeychain(override) + keychain = getKeychain(override, env.RegistryUsername, env.RegistryPassword) break } } @@ -313,3 +320,15 @@ func isStreamResetError(err error) bool { } return false } + +type usernamePasswordKeychain struct { + username string + password string +} + +func (k usernamePasswordKeychain) Resolve(_ authn.Resource) (authn.Authenticator, error) { + return authn.FromConfig(authn.AuthConfig{ + Username: k.username, + Password: k.password, + }), nil +} diff --git a/pkg/fleet/internal/paths/installer_paths.go b/pkg/fleet/internal/paths/installer_paths.go index 24e53dc13dceb..394f1af1a5093 100644 --- a/pkg/fleet/internal/paths/installer_paths.go +++ b/pkg/fleet/internal/paths/installer_paths.go @@ -15,7 +15,12 @@ const ( ConfigsPath = "/etc/datadog-packages" // LocksPath is the path to the packages locks directory. LocksPath = "/opt/datadog-packages/run/locks" - + // RootTmpDir is the temporary path where the bootstrapper will be extracted to. + RootTmpDir = "/opt/datadog-installer/tmp" // DefaultUserConfigsDir is the default Agent configuration directory. DefaultUserConfigsDir = "/etc" + // StableInstallerPath is the path to the stable installer binary. + StableInstallerPath = "/opt/datadog-packages/datadog-installer/stable/bin/installer/installer" + // ExperimentInstallerPath is the path to the experiment installer binary. + ExperimentInstallerPath = "/opt/datadog-packages/datadog-installer/experiment/bin/installer/installer" ) diff --git a/pkg/fleet/internal/paths/installer_paths_windows.go b/pkg/fleet/internal/paths/installer_paths_windows.go index 456cb7e6af7c2..07aa3216ce9da 100644 --- a/pkg/fleet/internal/paths/installer_paths_windows.go +++ b/pkg/fleet/internal/paths/installer_paths_windows.go @@ -22,9 +22,12 @@ var ( ConfigsPath string // LocksPath is the path to the locks directory. LocksPath string - + // RootTmpDir is the temporary path where the bootstrapper will be extracted to. + RootTmpDir string // DefaultUserConfigsDir is the default Agent configuration directory DefaultUserConfigsDir string + // StableInstallerPath is the path to the stable installer binary. + StableInstallerPath string ) func init() { @@ -32,5 +35,8 @@ func init() { PackagesPath = filepath.Join(datadogInstallerData, "packages") ConfigsPath = filepath.Join(datadogInstallerData, "configs") LocksPath = filepath.Join(datadogInstallerData, "locks") + RootTmpDir = filepath.Join(datadogInstallerData, "tmp") + datadogInstallerPath := "C:\\Program Files\\Datadog\\Datadog Installer" + StableInstallerPath = filepath.Join(datadogInstallerPath, "datadog-installer.exe") DefaultUserConfigsDir, _ = windows.KnownFolderPath(windows.FOLDERID_ProgramData, 0) } diff --git a/pkg/fleet/internal/winregistry/winregistry.go b/pkg/fleet/internal/winregistry/winregistry.go index 3b7b3bdb8e91e..68577cc5832b3 100644 --- a/pkg/fleet/internal/winregistry/winregistry.go +++ b/pkg/fleet/internal/winregistry/winregistry.go @@ -9,6 +9,7 @@ package winregistry import ( + "fmt" "golang.org/x/sys/windows" "golang.org/x/sys/windows/registry" "path/filepath" @@ -42,3 +43,28 @@ func GetProgramDataDirForProduct(product string) (path string, err error) { path = val return } + +// GetAgentUserName returns the user name for the Agent, stored in the registry by the Installer MSI +func GetAgentUserName() (string, error) { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, "SOFTWARE\\Datadog\\Datadog Installer", registry.QUERY_VALUE) + if err != nil { + return "", err + } + defer k.Close() + + user, _, err := k.GetStringValue("installedUser") + if err != nil { + return "", fmt.Errorf("could not read installedUser in registry: %w", err) + } + + domain, _, err := k.GetStringValue("installedDomain") + if err != nil { + return "", fmt.Errorf("could not read installedDomain in registry: %w", err) + } + + if domain != "" { + user = domain + `\` + user + } + + return user, nil +} diff --git a/pkg/gohai/filesystem/filesystem_nix.go b/pkg/gohai/filesystem/filesystem_nix.go index f25f6adc97d3e..27f5094d1e282 100644 --- a/pkg/gohai/filesystem/filesystem_nix.go +++ b/pkg/gohai/filesystem/filesystem_nix.go @@ -74,9 +74,7 @@ func replaceDev(oldMount, newMount MountInfo) bool { } // getFileSystemInfoWithMounts is an internal method to help testing with test mounts and mocking syscalls -func getFileSystemInfoWithMounts(initialMounts []*mountinfo.Info, sizeKB, dev fsInfoGetter) ([]MountInfo, error) { - mounts := initialMounts - +func getFileSystemInfoWithMounts(mounts []*mountinfo.Info, sizeKB, dev fsInfoGetter) ([]MountInfo, error) { devMountInfos := map[uint64]MountInfo{} for _, mount := range mounts { // Skip mounts that seem to be missing data diff --git a/pkg/gohai/filesystem/filesystem_nix_test.go b/pkg/gohai/filesystem/filesystem_nix_test.go index c41b7c369110d..55a7c92563370 100644 --- a/pkg/gohai/filesystem/filesystem_nix_test.go +++ b/pkg/gohai/filesystem/filesystem_nix_test.go @@ -117,7 +117,6 @@ func TestNixFSTypeFiltering(t *testing.T) { mounts, err := getFileSystemInfoWithMounts(inputMounts, mockFSSizeKB, getMockFSDev()) require.NoError(t, err) - require.Equal(t, len(expectedMounts), len(mounts)) assert.ElementsMatch(t, mounts, expectedMounts) }) } @@ -260,8 +259,9 @@ func TestFilterDev(t *testing.T) { func newTestInputMountinfo(name string) *mountinfo.Info { return &mountinfo.Info{ + // add suffixes to the name to avoid having an ignored source / type / mountpoint Source: name + "Source", - FSType: name, + FSType: name + "Type", Mountpoint: name + "MountPoint", } } diff --git a/pkg/internaltelemetry/client.go b/pkg/internaltelemetry/client.go index bb8b0569c501b..f9675ab906780 100644 --- a/pkg/internaltelemetry/client.go +++ b/pkg/internaltelemetry/client.go @@ -20,11 +20,11 @@ import ( "go.uber.org/atomic" - metadatautils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" + "github.com/shirou/gopsutil/v3/host" ) const ( @@ -115,7 +115,11 @@ type httpClient interface { // NewClient creates a new telemetry client func NewClient(httpClient httpClient, endpoints []*config.Endpoint, service string, debug bool) Client { - info := metadatautils.GetInformation() + info, err := host.Info() + if err != nil { + log.Errorf("failed to retrieve host info: %v", err) + info = &host.InfoStat{} + } return &client{ client: httpClient, endpoints: endpoints, diff --git a/pkg/jmxfetch/jmxfetch.go b/pkg/jmxfetch/jmxfetch.go index 2f0fb0537beca..1a65b1acb5886 100644 --- a/pkg/jmxfetch/jmxfetch.go +++ b/pkg/jmxfetch/jmxfetch.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" api "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/health" jmxStatus "github.com/DataDog/datadog-agent/pkg/status/jmx" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -121,7 +121,7 @@ func NewJMXFetch(logger jmxlogger.Component) *JMXFetch { // Monitor monitors this JMXFetch instance, waiting for JMX to stop. Gracefully handles restarting the JMXFetch process. func (j *JMXFetch) Monitor() { - limiter := newRestartLimiter(config.Datadog().GetInt("jmx_max_restarts"), float64(config.Datadog().GetInt("jmx_restart_interval"))) + limiter := newRestartLimiter(pkgconfigsetup.Datadog().GetInt("jmx_max_restarts"), float64(pkgconfigsetup.Datadog().GetInt("jmx_restart_interval"))) ticker := time.NewTicker(500 * time.Millisecond) defer ticker.Stop() @@ -192,7 +192,7 @@ func (j *JMXFetch) Start(manage bool) error { classpath = fmt.Sprintf("%s%s%s", j.JavaToolsJarPath, string(os.PathListSeparator), classpath) } - globalCustomJars := config.Datadog().GetStringSlice("jmx_custom_jars") + globalCustomJars := pkgconfigsetup.Datadog().GetStringSlice("jmx_custom_jars") if len(globalCustomJars) > 0 { classpath = fmt.Sprintf("%s%s%s", strings.Join(globalCustomJars, string(os.PathListSeparator)), string(os.PathListSeparator), classpath) } @@ -209,13 +209,13 @@ func (j *JMXFetch) Start(manage bool) error { reporter = "json" default: if j.DSD != nil && j.DSD.UdsListenerRunning() { - reporter = fmt.Sprintf("statsd:unix://%s", config.Datadog().GetString("dogstatsd_socket")) + reporter = fmt.Sprintf("statsd:unix://%s", pkgconfigsetup.Datadog().GetString("dogstatsd_socket")) } else { - bindHost := config.GetBindHost() + bindHost := pkgconfigsetup.GetBindHost(pkgconfigsetup.Datadog()) if bindHost == "" || bindHost == "0.0.0.0" { bindHost = "localhost" } - reporter = fmt.Sprintf("statsd:%s:%s", bindHost, config.Datadog().GetString("dogstatsd_port")) + reporter = fmt.Sprintf("statsd:%s:%s", bindHost, pkgconfigsetup.Datadog().GetString("dogstatsd_port")) } } @@ -226,14 +226,14 @@ func (j *JMXFetch) Start(manage bool) error { // Specify a maximum memory allocation pool for the JVM javaOptions := j.JavaOptions - useContainerSupport := config.Datadog().GetBool("jmx_use_container_support") - useCgroupMemoryLimit := config.Datadog().GetBool("jmx_use_cgroup_memory_limit") + useContainerSupport := pkgconfigsetup.Datadog().GetBool("jmx_use_container_support") + useCgroupMemoryLimit := pkgconfigsetup.Datadog().GetBool("jmx_use_cgroup_memory_limit") if useContainerSupport && useCgroupMemoryLimit { return fmt.Errorf("incompatible options %q and %q", jvmContainerSupport, jvmCgroupMemoryAwareness) } else if useContainerSupport { javaOptions += jvmContainerSupport - maxHeapSizeAsPercentRAM := config.Datadog().GetFloat64("jmx_max_ram_percentage") + maxHeapSizeAsPercentRAM := pkgconfigsetup.Datadog().GetFloat64("jmx_max_ram_percentage") passOption := true // These options overwrite the -XX:MaxRAMPercentage option, log a warning if they are found in the javaOptions if strings.Contains(javaOptions, "Xmx") || strings.Contains(javaOptions, "XX:MaxHeapSize") { @@ -278,11 +278,11 @@ func (j *JMXFetch) Start(manage bool) error { jmxLogLevel = "INFO" } - ipcHost, err := config.GetIPCAddress() + ipcHost, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - ipcPort := config.Datadog().GetInt("cmd_port") + ipcPort := pkgconfigsetup.Datadog().GetInt("cmd_port") if j.IPCHost != "" { ipcHost = j.IPCHost } @@ -296,37 +296,37 @@ func (j *JMXFetch) Start(manage bool) error { jmxMainClass, "--ipc_host", ipcHost, "--ipc_port", fmt.Sprintf("%v", ipcPort), - "--check_period", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_check_period")), // Period of the main loop of jmxfetch in ms - "--thread_pool_size", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_thread_pool_size")), // Size for the JMXFetch thread pool - "--collection_timeout", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_collection_timeout")), // Timeout for metric collection in seconds - "--reconnection_timeout", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_reconnection_timeout")), // Timeout for instance reconnection in seconds - "--reconnection_thread_pool_size", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_reconnection_thread_pool_size")), // Size for the JMXFetch reconnection thread pool + "--check_period", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_check_period")), // Period of the main loop of jmxfetch in ms + "--thread_pool_size", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_thread_pool_size")), // Size for the JMXFetch thread pool + "--collection_timeout", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_collection_timeout")), // Timeout for metric collection in seconds + "--reconnection_timeout", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_reconnection_timeout")), // Timeout for instance reconnection in seconds + "--reconnection_thread_pool_size", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_reconnection_thread_pool_size")), // Size for the JMXFetch reconnection thread pool "--log_level", jmxLogLevel, "--reporter", reporter, // Reporter to use - "--statsd_queue_size", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_statsd_client_queue_size")), // Dogstatsd client queue size to use + "--statsd_queue_size", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_queue_size")), // Dogstatsd client queue size to use ) - if config.Datadog().GetBool("jmx_statsd_telemetry_enabled") { + if pkgconfigsetup.Datadog().GetBool("jmx_statsd_telemetry_enabled") { subprocessArgs = append(subprocessArgs, "--statsd_telemetry") } - if config.Datadog().GetBool("jmx_telemetry_enabled") { + if pkgconfigsetup.Datadog().GetBool("jmx_telemetry_enabled") { subprocessArgs = append(subprocessArgs, "--jmxfetch_telemetry") } - if config.Datadog().GetBool("jmx_statsd_client_use_non_blocking") { + if pkgconfigsetup.Datadog().GetBool("jmx_statsd_client_use_non_blocking") { subprocessArgs = append(subprocessArgs, "--statsd_nonblocking") } - if bufSize := config.Datadog().GetInt("jmx_statsd_client_buffer_size"); bufSize != 0 { + if bufSize := pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_buffer_size"); bufSize != 0 { subprocessArgs = append(subprocessArgs, "--statsd_buffer_size", fmt.Sprintf("%d", bufSize)) } - if socketTimeout := config.Datadog().GetInt("jmx_statsd_client_socket_timeout"); socketTimeout != 0 { + if socketTimeout := pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_socket_timeout"); socketTimeout != 0 { subprocessArgs = append(subprocessArgs, "--statsd_socket_timeout", fmt.Sprintf("%d", socketTimeout)) } - if config.Datadog().GetBool("log_format_rfc3339") { + if pkgconfigsetup.Datadog().GetBool("log_format_rfc3339") { subprocessArgs = append(subprocessArgs, "--log_format_rfc3339") } diff --git a/pkg/jmxfetch/runner.go b/pkg/jmxfetch/runner.go index 7f53e9b571762..9717edfeb262e 100644 --- a/pkg/jmxfetch/runner.go +++ b/pkg/jmxfetch/runner.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/agent/jmxlogger" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" jmxStatus "github.com/DataDog/datadog-agent/pkg/status/jmx" ) @@ -24,7 +24,7 @@ type runner struct { func (r *runner) initRunner(server dogstatsdServer.Component, logger jmxlogger.Component) { r.jmxfetch = NewJMXFetch(logger) - r.jmxfetch.LogLevel = config.Datadog().GetString("log_level") + r.jmxfetch.LogLevel = pkgconfigsetup.Datadog().GetString("log_level") r.jmxfetch.DSD = server } diff --git a/pkg/kubestatemetrics/builder/builder.go b/pkg/kubestatemetrics/builder/builder.go index 148318bc1d6fb..56fa8ff3e0c19 100644 --- a/pkg/kubestatemetrics/builder/builder.go +++ b/pkg/kubestatemetrics/builder/builder.go @@ -50,6 +50,7 @@ type Builder struct { collectPodsFromKubelet bool collectOnlyUnassignedPods bool + KubeletReflector *kubeletReflector } // New returns new Builder instance @@ -161,7 +162,17 @@ func (b *Builder) Build() metricsstore.MetricsWriterList { // BuildStores initializes and registers all enabled stores. // It returns metric cache stores. func (b *Builder) BuildStores() [][]cache.Store { - return b.ksmBuilder.BuildStores() + stores := b.ksmBuilder.BuildStores() + + if b.KubeletReflector != nil { + // Starting the reflector here allows us to start just one for all stores. + err := b.KubeletReflector.start(b.ctx) + if err != nil { + log.Errorf("Failed to start the kubelet reflector: %s", err) + } + } + + return stores } // WithResync is used if a resync period is configured @@ -302,7 +313,22 @@ func (c *cacheEnabledListerWatcher) List(options v1.ListOptions) (runtime.Object func handlePodCollection[T any](b *Builder, store cache.Store, client T, listWatchFunc func(kubeClient T, ns string, fieldSelector string) cache.ListerWatcher, namespace string, useAPIServerCache bool) { if b.collectPodsFromKubelet { - b.startKubeletPodWatcher(store, namespace) + if b.KubeletReflector == nil { + kr, err := newKubeletReflector(b.namespaces) + if err != nil { + log.Errorf("Failed to create kubeletReflector: %s", err) + return + } + b.KubeletReflector = &kr + } + + err := b.KubeletReflector.addStore(store) + if err != nil { + log.Errorf("Failed to add store to kubeletReflector: %s", err) + return + } + + // The kubelet reflector will be started when all stores are added. return } diff --git a/pkg/kubestatemetrics/builder/kubelet_pods.go b/pkg/kubestatemetrics/builder/kubelet_pods.go index ce7af8ce6683c..c0a50018c110a 100644 --- a/pkg/kubestatemetrics/builder/kubelet_pods.go +++ b/pkg/kubestatemetrics/builder/kubelet_pods.go @@ -10,6 +10,7 @@ package builder import ( "context" "fmt" + "slices" "strings" "time" @@ -22,57 +23,107 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -// PodWatcher is an interface for a component that watches for changes in pods -type PodWatcher interface { +const ( + podWatcherExpiryDuration = 15 * time.Second + updateStoresPeriod = 5 * time.Second +) + +// podWatcher is an interface for a component that watches for changes in pods +type podWatcher interface { PullChanges(ctx context.Context) ([]*kubelet.Pod, error) Expire() ([]string, error) } -func (b *Builder) startKubeletPodWatcher(store cache.Store, namespace string) { - podWatcher, err := kubelet.NewPodWatcher(15 * time.Second) +type kubeletReflector struct { + namespaces []string + watchAllNamespaces bool + podWatcher podWatcher + + // Having an array of stores allows us to have a single watcher for all the + // collectors configured (by default it's the pods one plus "pods_extended") + stores []cache.Store + + started bool +} + +func newKubeletReflector(namespaces []string) (kubeletReflector, error) { + watcher, err := kubelet.NewPodWatcher(podWatcherExpiryDuration) if err != nil { - log.Warnf("Failed to create pod watcher: %s", err) + return kubeletReflector{}, fmt.Errorf("failed to create kubelet-based reflector: %w", err) + } + + watchAllNamespaces := slices.Contains(namespaces, corev1.NamespaceAll) + + return kubeletReflector{ + namespaces: namespaces, + watchAllNamespaces: watchAllNamespaces, + podWatcher: watcher, + }, nil +} + +func (kr *kubeletReflector) addStore(store cache.Store) error { + if kr.started { + return fmt.Errorf("cannot add store after reflector has started") } - ticker := time.NewTicker(5 * time.Second) + kr.stores = append(kr.stores, store) + + return nil +} + +// start starts the reflector. It should be called only once after all the +// stores have been added +func (kr *kubeletReflector) start(context context.Context) error { + if kr.started { + return fmt.Errorf("reflector already started") + } + + kr.started = true + + ticker := time.NewTicker(updateStoresPeriod) go func() { for { select { case <-ticker.C: - err = updateStore(b.ctx, store, podWatcher, namespace) + err := kr.updateStores(context) if err != nil { - log.Errorf("Failed to update store: %s", err) + log.Errorf("Failed to update stores: %s", err) } - case <-b.ctx.Done(): + case <-context.Done(): ticker.Stop() return } } }() + + return nil } -func updateStore(ctx context.Context, store cache.Store, podWatcher PodWatcher, namespace string) error { - pods, err := podWatcher.PullChanges(ctx) +func (kr *kubeletReflector) updateStores(ctx context.Context) error { + pods, err := kr.podWatcher.PullChanges(ctx) if err != nil { return fmt.Errorf("failed to pull changes from pod watcher: %w", err) } for _, pod := range pods { - if namespace != corev1.NamespaceAll && pod.Metadata.Namespace != namespace { + if !kr.watchAllNamespaces && !slices.Contains(kr.namespaces, pod.Metadata.Namespace) { continue } kubePod := kubelet.ConvertKubeletPodToK8sPod(pod) - err = store.Add(kubePod) - if err != nil { - log.Warnf("Failed to add pod to KSM store: %s", err) + for _, store := range kr.stores { + err := store.Add(kubePod) + if err != nil { + // log instead of returning error to continue updating other stores + log.Warnf("Failed to add pod to store: %s", err) + } } } - expiredEntities, err := podWatcher.Expire() + expiredEntities, err := kr.podWatcher.Expire() if err != nil { return fmt.Errorf("failed to expire pods: %w", err) } @@ -91,9 +142,12 @@ func updateStore(ctx context.Context, store cache.Store, podWatcher PodWatcher, }, } - err = store.Delete(&expiredPod) - if err != nil { - log.Warnf("Failed to delete pod from KSM store: %s", err) + for _, store := range kr.stores { + err := store.Delete(&expiredPod) + if err != nil { + // log instead of returning error to continue updating other stores + log.Warnf("Failed to delete pod from store: %s", err) + } } } diff --git a/pkg/kubestatemetrics/builder/kubelet_pods_stub.go b/pkg/kubestatemetrics/builder/kubelet_pods_stub.go index b4da17ab6227d..7682655232056 100644 --- a/pkg/kubestatemetrics/builder/kubelet_pods_stub.go +++ b/pkg/kubestatemetrics/builder/kubelet_pods_stub.go @@ -8,9 +8,24 @@ package builder import ( + "context" + "k8s.io/client-go/tools/cache" ) -func (b *Builder) startKubeletPodWatcher(_ cache.Store, _ string) { - // Do nothing +// When the Kubelet flag is not set, we don't need a kubeletReflector, so we can +// return a struct that does nothing + +type kubeletReflector struct{} + +func newKubeletReflector(_ []string) (kubeletReflector, error) { + return kubeletReflector{}, nil +} + +func (kr *kubeletReflector) addStore(_ cache.Store) error { + return nil +} + +func (kr *kubeletReflector) start(_ context.Context) error { + return nil } diff --git a/pkg/kubestatemetrics/builder/kubelet_pods_test.go b/pkg/kubestatemetrics/builder/kubelet_pods_test.go index 94f5f26a798ee..a9020b2143549 100644 --- a/pkg/kubestatemetrics/builder/kubelet_pods_test.go +++ b/pkg/kubestatemetrics/builder/kubelet_pods_test.go @@ -9,10 +9,11 @@ package builder import ( "context" + "slices" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -83,83 +84,140 @@ func (m *MockStore) Resync() error { return nil } -func TestUpdateStore_AddPodToStore(t *testing.T) { - store := new(MockStore) - podWatcher := new(MockPodWatcher) - - kubeletPod := &kubelet.Pod{ - Metadata: kubelet.PodMetadata{ - Name: "test-pod", - Namespace: "default", - UID: "12345", +func TestUpdateStores_AddPods(t *testing.T) { + tests := []struct { + name string + reflectorNamespaces []string + addedPodNamespace string + podShouldBeAdded bool + }{ + { + name: "add pod in watched namespace", + reflectorNamespaces: []string{"default"}, + addedPodNamespace: "default", + podShouldBeAdded: true, }, - } - - kubernetesPod := kubelet.ConvertKubeletPodToK8sPod(kubeletPod) - - podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil) - podWatcher.On("Expire").Return([]string{}, nil) - store.On("Add", kubernetesPod).Return(nil) - - err := updateStore(context.TODO(), store, podWatcher, "default") - assert.NoError(t, err) - - store.AssertCalled(t, "Add", kubernetesPod) -} - -func TestUpdateStore_FilterPodsByNamespace(t *testing.T) { - store := new(MockStore) - podWatcher := new(MockPodWatcher) - - kubeletPod := &kubelet.Pod{ - Metadata: kubelet.PodMetadata{ - Name: "test-pod", - Namespace: "other-namespace", - UID: "12345", + { + name: "add pod in non-watched namespace", + reflectorNamespaces: []string{"default"}, + addedPodNamespace: "other-namespace", + podShouldBeAdded: false, + }, + { + name: "reflector watches all pods", + reflectorNamespaces: []string{corev1.NamespaceAll}, + addedPodNamespace: "default", + podShouldBeAdded: true, }, } - store.On("Add", mock.Anything).Return(nil) - podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil) - podWatcher.On("Expire").Return([]string{}, nil) - - err := updateStore(context.TODO(), store, podWatcher, "default") - assert.NoError(t, err) - - // Add() shouldn't be called because the pod is in a different namespace - store.AssertNotCalled(t, "Add", mock.Anything) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + stores := []*MockStore{ + new(MockStore), + new(MockStore), + } + for _, store := range stores { + store.On("Add", mock.Anything).Return(nil) + } + + watcher := new(MockPodWatcher) + + kubeletPod := &kubelet.Pod{ + Metadata: kubelet.PodMetadata{ + Namespace: test.addedPodNamespace, + Name: "test-pod", + UID: "12345", + }, + } + + kubernetesPod := kubelet.ConvertKubeletPodToK8sPod(kubeletPod) + + watcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{kubeletPod}, nil) + watcher.On("Expire").Return([]string{}, nil) + + reflector := kubeletReflector{ + namespaces: test.reflectorNamespaces, + watchAllNamespaces: slices.Contains(test.reflectorNamespaces, corev1.NamespaceAll), + podWatcher: watcher, + } + + for _, store := range stores { + err := reflector.addStore(store) + require.NoError(t, err) + } + + err := reflector.updateStores(context.TODO()) + require.NoError(t, err) + + if test.podShouldBeAdded { + for _, store := range stores { + store.AssertCalled(t, "Add", kubernetesPod) + } + } else { + for _, store := range stores { + store.AssertNotCalled(t, "Add", mock.Anything) + } + } + }) + } } -func TestUpdateStore_HandleExpiredPods(t *testing.T) { - store := new(MockStore) - podWatcher := new(MockPodWatcher) - podUID := "kubernetes_pod://pod-12345" - kubernetesPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: types.UID("pod-12345"), +func TestUpdateStores_HandleExpired(t *testing.T) { + tests := []struct { + name string + expiredUID string + expectedPodToBeDeleted *corev1.Pod + }{ + { + name: "expired pod", + expiredUID: "kubernetes_pod://pod-12345", + expectedPodToBeDeleted: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID("pod-12345"), + }, + }, + }, + { + name: "expired container", + expiredUID: "container-12345", + expectedPodToBeDeleted: nil, }, } - podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil) - podWatcher.On("Expire").Return([]string{podUID}, nil) - store.On("Delete", &kubernetesPod).Return(nil) - - err := updateStore(context.TODO(), store, podWatcher, "default") - assert.NoError(t, err) - - store.AssertCalled(t, "Delete", &kubernetesPod) -} - -func TestUpdateStore_HandleExpiredContainers(t *testing.T) { - store := new(MockStore) - podWatcher := new(MockPodWatcher) - - podWatcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil) - podWatcher.On("Expire").Return([]string{"container-12345"}, nil) - - err := updateStore(context.TODO(), store, podWatcher, "default") - assert.NoError(t, err) - - // Delete() shouldn't be called because the expired entity is not a pod - store.AssertNotCalled(t, "Delete", mock.Anything) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + stores := []*MockStore{ + new(MockStore), + new(MockStore), + } + for _, store := range stores { + store.On("Delete", mock.Anything).Return(nil) + } + + watcher := new(MockPodWatcher) + watcher.On("PullChanges", mock.Anything).Return([]*kubelet.Pod{}, nil) + watcher.On("Expire").Return([]string{test.expiredUID}, nil) + + reflector := kubeletReflector{ + namespaces: []string{"default"}, + podWatcher: watcher, + } + for _, store := range stores { + err := reflector.addStore(store) + require.NoError(t, err) + } + + err := reflector.updateStores(context.TODO()) + require.NoError(t, err) + + for _, store := range stores { + if test.expectedPodToBeDeleted != nil { + store.AssertCalled(t, "Delete", test.expectedPodToBeDeleted) + } else { + store.AssertNotCalled(t, "Delete", mock.Anything) + } + } + }) + } } diff --git a/pkg/languagedetection/detector.go b/pkg/languagedetection/detector.go index 2ba7e681a3113..92ab7fe4cc4c5 100644 --- a/pkg/languagedetection/detector.go +++ b/pkg/languagedetection/detector.go @@ -12,7 +12,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/languagedetection/internal/detectors" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/process/net" @@ -92,7 +92,7 @@ var ( ) // DetectLanguage uses a combination of commandline parsing and binary analysis to detect a process' language -func DetectLanguage(procs []languagemodels.Process, sysprobeConfig config.Reader) []*languagemodels.Language { +func DetectLanguage(procs []languagemodels.Process, sysprobeConfig model.Reader) []*languagemodels.Language { detectLanguageStart := time.Now() defer func() { detectLanguageRuntimeMs.Observe(float64(time.Since(detectLanguageStart).Milliseconds())) @@ -161,7 +161,7 @@ func DetectLanguage(procs []languagemodels.Process, sysprobeConfig config.Reader return langs } -func privilegedLanguageDetectionEnabled(sysProbeConfig config.Reader) bool { +func privilegedLanguageDetectionEnabled(sysProbeConfig model.Reader) bool { if sysProbeConfig == nil { return false } diff --git a/pkg/languagedetection/detector_linux_test.go b/pkg/languagedetection/detector_linux_test.go index d38572df36c1f..bd6ad87f808ae 100644 --- a/pkg/languagedetection/detector_linux_test.go +++ b/pkg/languagedetection/detector_linux_test.go @@ -29,7 +29,7 @@ func startTestUnixServer(t *testing.T, handler http.Handler) string { t.Helper() socketPath := path.Join(t.TempDir(), "test.sock") - listener, err := net.NewListener(socketPath) + listener, err := net.NewSystemProbeListener(socketPath) require.NoError(t, err) t.Cleanup(listener.Stop) diff --git a/pkg/languagedetection/internal/detectors/dotnet_detector.go b/pkg/languagedetection/internal/detectors/dotnet_detector.go new file mode 100644 index 0000000000000..c3a8c7403d88c --- /dev/null +++ b/pkg/languagedetection/internal/detectors/dotnet_detector.go @@ -0,0 +1,84 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package detectors + +import ( + "bufio" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" + + dderrors "github.com/DataDog/datadog-agent/pkg/errors" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" + "github.com/DataDog/datadog-agent/pkg/util/kernel" +) + +const runtimeDll = "/System.Runtime.dll" + +var errorDllNotFound = dderrors.NewNotFound(runtimeDll) + +// DotnetDetector detects .NET processes. +type DotnetDetector struct { + hostProc string +} + +// NewDotnetDetector creates a new instance of DotnetDetector. +func NewDotnetDetector() DotnetDetector { + return DotnetDetector{hostProc: kernel.ProcFSRoot()} +} + +// mapsHasDotnetDll checks if the maps file includes a path with the .NET +// runtime DLL. +func mapsHasDotnetDll(reader io.Reader) (bool, error) { + scanner := bufio.NewScanner(bufio.NewReader(reader)) + + for scanner.Scan() { + line := scanner.Text() + + if strings.HasSuffix(line, runtimeDll) { + return true, nil + } + } + + return false, scanner.Err() +} + +func (d DotnetDetector) getMapsPath(pid int32) string { + return path.Join(d.hostProc, strconv.FormatInt(int64(pid), 10), "maps") +} + +// DetectLanguage detects if a process is a .NET process. It does this by using +// /proc/PID/maps to check if the process has mapped a standard .NET dll. This +// works for non-single-file deployments (both self-contained and +// framework-dependent), and framework-dependent single-file deployments. +// +// It does not work for self-contained single-file deployments since these do +// not have any DLLs in their maps file. +func (d DotnetDetector) DetectLanguage(process languagemodels.Process) (languagemodels.Language, error) { + path := d.getMapsPath(process.GetPid()) + mapsFile, err := os.Open(path) + if err != nil { + return languagemodels.Language{}, fmt.Errorf("open: %v", err) + } + defer mapsFile.Close() + + hasDLL, err := mapsHasDotnetDll(mapsFile) + if err != nil { + return languagemodels.Language{}, err + } + if !hasDLL { + return languagemodels.Language{}, errorDllNotFound + } + + return languagemodels.Language{ + Name: languagemodels.Dotnet, + }, nil +} diff --git a/pkg/languagedetection/internal/detectors/dotnet_detector_test.go b/pkg/languagedetection/internal/detectors/dotnet_detector_test.go new file mode 100644 index 0000000000000..78b7bf146e619 --- /dev/null +++ b/pkg/languagedetection/internal/detectors/dotnet_detector_test.go @@ -0,0 +1,79 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package detectors + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" + "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" + fileopener "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries/testutil" + "github.com/DataDog/datadog-agent/pkg/proto/pbgo/languagedetection" +) + +func TestDotnetMapsParser(t *testing.T) { + data := []struct { + name string + maps string + result bool + }{ + { + name: "empty maps", + maps: "", + }, + { + name: "not in maps", + maps: ` +79f6cd47d000-79f6cd47f000 r--p 00000000 fc:04 793163 /usr/lib/python3.10/lib-dynload/_bz2.cpython-310-x86_64-linux-gnu.so +79f6cd479000-79f6cd47a000 r-xp 00001000 fc:06 5507018 /home/foo/.local/lib/python3.10/site-packages/ddtrace_fake/md.cpython-310-x86_64-linux-gnu.so + `, + result: false, + }, + { + name: "in maps", + maps: ` +7d97b4e57000-7d97b4e85000 r--s 00000000 fc:04 1332568 /usr/lib/dotnet/shared/Microsoft.NETCore.App/8.0.8/System.Con +sole.dll +7d97b4e85000-7d97b4e8e000 r--s 00000000 fc:04 1332665 /usr/lib/dotnet/shared/Microsoft.NETCore.App/8.0.8/System.Runtime.dll +7d97b4e8e000-7d97b4e99000 r--p 00000000 fc:04 1332718 /usr/lib/dotnet/shared/Microsoft.NETCore.App/8.0.8/libSystem.Native.so + `, + result: true, + }, + } + for _, d := range data { + t.Run(d.name, func(t *testing.T) { + result, err := mapsHasDotnetDll(strings.NewReader(d.maps)) + assert.NoError(t, err) + assert.Equal(t, d.result, result) + }) + } +} + +func TestDotnetDetector(t *testing.T) { + curDir, err := testutil.CurDir() + require.NoError(t, err) + + dll := filepath.Join(curDir, "testdata", "System.Runtime.dll") + cmd, err := fileopener.OpenFromAnotherProcess(t, dll) + require.NoError(t, err) + + proc := &languagedetection.Process{Pid: int32(cmd.Process.Pid)} + langInfo, err := NewDotnetDetector().DetectLanguage(proc) + require.NoError(t, err) + assert.Equal(t, languagemodels.Dotnet, langInfo.Name) + + self := &languagedetection.Process{Pid: int32(os.Getpid())} + _, err = NewDotnetDetector().DetectLanguage(self) + require.Error(t, err) +} diff --git a/pkg/languagedetection/internal/detectors/testdata/System.Runtime.dll b/pkg/languagedetection/internal/detectors/testdata/System.Runtime.dll new file mode 100644 index 0000000000000..421376db9e8ae --- /dev/null +++ b/pkg/languagedetection/internal/detectors/testdata/System.Runtime.dll @@ -0,0 +1 @@ +dummy diff --git a/pkg/languagedetection/privileged/privileged_detector.go b/pkg/languagedetection/privileged/privileged_detector.go index cf050a77676f5..b6447ea310c14 100644 --- a/pkg/languagedetection/privileged/privileged_detector.go +++ b/pkg/languagedetection/privileged/privileged_detector.go @@ -30,6 +30,7 @@ import ( var detectorsWithPrivilege = []languagemodels.Detector{ detectors.NewGoDetector(), + detectors.NewDotnetDetector(), } var ( @@ -97,6 +98,7 @@ func (l *LanguageDetector) DetectWithPrivileges(procs []languagemodels.Process) continue } languages[i] = lang + break } l.mux.Lock() l.binaryIDCache.Add(bin, lang) diff --git a/pkg/languagedetection/privileged/privileged_detector_test.go b/pkg/languagedetection/privileged/privileged_detector_test.go index aba97bf96595c..485a02bbe5ec9 100644 --- a/pkg/languagedetection/privileged/privileged_detector_test.go +++ b/pkg/languagedetection/privileged/privileged_detector_test.go @@ -8,6 +8,7 @@ package privileged import ( + "errors" "os" "os/exec" "path/filepath" @@ -149,3 +150,74 @@ func TestShortLivingProc(t *testing.T) { require.Equal(t, languagemodels.Language{}, res[0]) require.Zero(t, d.binaryIDCache.Len()) } + +// DummyDetector is a detector used for testing +type DummyDetector struct { + language languagemodels.LanguageName +} + +// DummyProcess is a process used for testing +type DummyProcess struct{} + +// GetPid is unused +func (p DummyProcess) GetPid() int32 { + return int32(os.Getpid()) +} + +// GetCommand is unused +func (p DummyProcess) GetCommand() string { + return "dummy" +} + +// GetCmdline is unused +func (p DummyProcess) GetCmdline() []string { + return []string{"dummy"} +} + +// DetectLanguage "detects" a dummy language for testing +func (d DummyDetector) DetectLanguage(_ languagemodels.Process) (languagemodels.Language, error) { + if d.language == languagemodels.Unknown { + return languagemodels.Language{}, errors.New("unable to detect") + } + + return languagemodels.Language{Name: languagemodels.LanguageName(d.language)}, nil +} + +func TestDetectorOrder(t *testing.T) { + for _, test := range []struct { + name string + detectors []languagemodels.Detector + language languagemodels.LanguageName + }{ + { + name: "stop at first good", + detectors: []languagemodels.Detector{ + DummyDetector{languagemodels.Java}, + DummyDetector{languagemodels.Python}}, + language: languagemodels.Java, + }, + { + name: "try second if first fails", + detectors: []languagemodels.Detector{ + DummyDetector{}, + DummyDetector{languagemodels.Python}}, + language: languagemodels.Python, + }, + { + name: "all fail", + detectors: []languagemodels.Detector{ + DummyDetector{}, + DummyDetector{}}, + language: languagemodels.Unknown, + }, + } { + t.Run(test.name, func(t *testing.T) { + MockPrivilegedDetectors(t, test.detectors) + d := NewLanguageDetector() + res := d.DetectWithPrivileges([]languagemodels.Process{DummyProcess{}}) + require.Len(t, res, 1) + require.NotNil(t, res[0]) + assert.Equal(t, test.language, res[0].Name) + }) + } +} diff --git a/pkg/logs/auditor/auditor.go b/pkg/logs/auditor/auditor.go index 578107bfa0e22..cad651a7c7d27 100644 --- a/pkg/logs/auditor/auditor.go +++ b/pkg/logs/auditor/auditor.go @@ -133,8 +133,7 @@ func (a *RegistryAuditor) Channel() chan *message.Payload { // GetOffset returns the last committed offset for a given identifier, // returns an empty string if it does not exist. func (a *RegistryAuditor) GetOffset(identifier string) string { - r := a.readOnlyRegistryCopy() - entry, exists := r[identifier] + entry, exists := a.readOnlyRegistryEntryCopy(identifier) if !exists { return "" } @@ -144,8 +143,7 @@ func (a *RegistryAuditor) GetOffset(identifier string) string { // GetTailingMode returns the last committed offset for a given identifier, // returns an empty string if it does not exist. func (a *RegistryAuditor) GetTailingMode(identifier string) string { - r := a.readOnlyRegistryCopy() - entry, exists := r[identifier] + entry, exists := a.readOnlyRegistryEntryCopy(identifier) if !exists { return "" } @@ -265,6 +263,16 @@ func (a *RegistryAuditor) readOnlyRegistryCopy() map[string]RegistryEntry { return r } +func (a *RegistryAuditor) readOnlyRegistryEntryCopy(identifier string) (RegistryEntry, bool) { + a.registryMutex.Lock() + defer a.registryMutex.Unlock() + entry, exists := a.registry[identifier] + if !exists { + return RegistryEntry{}, false + } + return *entry, true +} + // flushRegistry writes on disk the registry at the given path func (a *RegistryAuditor) flushRegistry() error { r := a.readOnlyRegistryCopy() diff --git a/pkg/logs/auditor/go.mod b/pkg/logs/auditor/go.mod index 52a3aba073a47..e36fc3f9239cb 100644 --- a/pkg/logs/auditor/go.mod +++ b/pkg/logs/auditor/go.mod @@ -15,7 +15,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/message => ../message github.com/DataDog/datadog-agent/pkg/logs/sources => ../sources @@ -43,28 +46,31 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -84,7 +90,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -92,12 +98,12 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/auditor/go.sum b/pkg/logs/auditor/go.sum index 77ba213060c82..765bdc23a7bf4 100644 --- a/pkg/logs/auditor/go.sum +++ b/pkg/logs/auditor/go.sum @@ -180,8 +180,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -251,15 +252,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +296,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -312,8 +313,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/logs/client/go.mod b/pkg/logs/client/go.mod index a57d90a5a1b37..4e43cd26019fb 100644 --- a/pkg/logs/client/go.mod +++ b/pkg/logs/client/go.mod @@ -17,7 +17,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/message => ../message github.com/DataDog/datadog-agent/pkg/logs/metrics => ../metrics @@ -46,7 +49,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 @@ -55,32 +58,35 @@ require ( github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 github.com/stretchr/testify v1.9.0 - golang.org/x/net v0.28.0 + golang.org/x/net v0.29.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -110,7 +116,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -129,12 +135,12 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/client/go.sum b/pkg/logs/client/go.sum index 08e2bfaac8ea7..d0bd498bbfbaa 100644 --- a/pkg/logs/client/go.sum +++ b/pkg/logs/client/go.sum @@ -1,44 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= @@ -59,15 +21,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -82,11 +38,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -94,9 +46,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -114,68 +63,26 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -183,21 +90,15 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9G github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= @@ -206,7 +107,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -237,8 +137,6 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= @@ -287,8 +185,8 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -307,8 +205,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -326,19 +222,9 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= @@ -373,106 +259,36 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -480,212 +296,47 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -711,12 +362,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/logs/diagnostic/go.mod b/pkg/logs/diagnostic/go.mod index 6c6616dd4d46d..30d8e41199554 100644 --- a/pkg/logs/diagnostic/go.mod +++ b/pkg/logs/diagnostic/go.mod @@ -16,7 +16,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/message => ../message github.com/DataDog/datadog-agent/pkg/logs/sources => ../sources @@ -49,26 +52,29 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -89,7 +95,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -102,12 +108,12 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/diagnostic/go.sum b/pkg/logs/diagnostic/go.sum index c0f06ba5f32fa..b8178bc54e8ad 100644 --- a/pkg/logs/diagnostic/go.sum +++ b/pkg/logs/diagnostic/go.sum @@ -182,8 +182,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -255,15 +256,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -299,8 +300,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -316,8 +317,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go index ead3da8f38604..b1aece4d0536f 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go @@ -11,8 +11,8 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/logs/message" + "github.com/DataDog/datadog-agent/pkg/logs/metrics" status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" - "github.com/DataDog/datadog-agent/pkg/telemetry" ) type bucket struct { @@ -61,27 +61,25 @@ func (b *bucket) flush() *message.Message { copy(content, data) msg := message.NewRawMessage(content, b.message.Status, b.originalDataLen, b.message.ParsingExtra.Timestamp) - tlmTags := []string{} + tlmTags := []string{"false", "single_line"} if b.lineCount > 1 { msg.ParsingExtra.IsMultiLine = true - tlmTags = append(tlmTags, "line_type:multi_line") + tlmTags[1] = "multi_line" if b.tagMultiLineLogs { - msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.AutoMultiLineTag) + msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.MultiLineSourceTag("auto_multiline")) } - } else { - tlmTags = append(tlmTags, "line_type:single_line") } if b.truncated { msg.ParsingExtra.IsTruncated = true - tlmTags = append(tlmTags, "truncated:true") + tlmTags[0] = "true" if b.tagTruncatedLogs { - msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedTag) + msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedReasonTag("auto_multiline")) } } - telemetry.GetStatsTelemetryProvider().Count("datadog.logs_agent.auto_multi_line_aggregator.flush", 1, tlmTags) + metrics.TlmAutoMultilineAggregatorFlush.Inc(tlmTags...) return msg } @@ -135,7 +133,6 @@ func (a *Aggregator) Aggregate(msg *message.Message, label Label) { // If `startGroup` - flush the bucket. if label == startGroup { a.multiLineMatchInfo.Add(1) - telemetry.GetStatsTelemetryProvider().Count("datadog.logs_agent.auto_multi_line_aggregator.multiline_matches", 1, []string{""}) a.Flush() } @@ -149,7 +146,6 @@ func (a *Aggregator) Aggregate(msg *message.Message, label Label) { if !a.bucket.isEmpty() { a.linesCombinedInfo.Add(1) - telemetry.GetStatsTelemetryProvider().Count("datadog.logs_agent.auto_multi_line_aggregator.lines_combined", 1, []string{""}) } a.bucket.add(msg) diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go index be067716ccb37..00150b7e41ab0 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go @@ -133,12 +133,12 @@ func TestTagTruncatedLogs(t *testing.T) { msg := <-outputChan assert.True(t, msg.ParsingExtra.IsTruncated) - assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedTag}) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("auto_multiline")}) assertMessageContent(t, msg, "1234567890...TRUNCATED...") msg = <-outputChan assert.True(t, msg.ParsingExtra.IsTruncated) - assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedTag}) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("auto_multiline")}) assertMessageContent(t, msg, "...TRUNCATED...1") msg = <-outputChan @@ -159,7 +159,7 @@ func TestTagMultiLineLogs(t *testing.T) { msg := <-outputChan assert.True(t, msg.ParsingExtra.IsMultiLine) assert.True(t, msg.ParsingExtra.IsTruncated) - assert.Equal(t, msg.ParsingExtra.Tags, []string{message.AutoMultiLineTag}) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.MultiLineSourceTag("auto_multiline")}) assertMessageContent(t, msg, "12345\\n67890...TRUNCATED...") msg = <-outputChan diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go index db8e9832c1961..6ea356296bf61 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) type testInput struct { @@ -82,8 +82,8 @@ var inputs = []testInput{ } func TestCorrectLabelIsAssigned(t *testing.T) { - tokenizer := NewTokenizer(config.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes")) - timestampDetector := NewTimestampDetector(config.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold")) + tokenizer := NewTokenizer(pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes")) + timestampDetector := NewTimestampDetector(pkgconfigsetup.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold")) for _, testInput := range inputs { context := &messageContext{ @@ -102,7 +102,7 @@ func TestCorrectLabelIsAssigned(t *testing.T) { } func printMatchUnderline(context *messageContext, input string, match MatchContext) { - maxLen := config.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes") + maxLen := pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes") fmt.Printf("%.2f\t\t%v\n", match.probability, input) if match.start == match.end { diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go index d60a6031ea3db..0639c6641cc3e 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go @@ -7,7 +7,9 @@ package automultilinedetection import ( - "github.com/DataDog/datadog-agent/pkg/config" + "regexp" + + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder/auto_multiline_detection/tokens" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -22,6 +24,8 @@ type UserSample struct { // From a user perspective, this is how similar the log has to be to the sample to be considered a match. // Optional - Default value is 0.75. MatchThreshold *float64 `mapstructure:"match_threshold,omitempty"` + // Regex is a pattern used to aggregate logs. NOTE that you can use either a sample or a regex, but not both. + Regex string `mapstructure:"regex,omitempty"` // Label is the label to apply to the log message if it matches the sample. // Optional - Default value is "start_group". Label *string `mapstructure:"label,omitempty"` @@ -30,6 +34,7 @@ type UserSample struct { tokens []tokens.Token matchThreshold float64 label Label + compiledRegex *regexp.Regexp } // UserSamples is a heuristic that represents a collection of user-defined samples for auto multi-line aggreagtion. @@ -38,7 +43,7 @@ type UserSamples struct { } // NewUserSamples creates a new UserSamples instance. -func NewUserSamples(config config.Reader) *UserSamples { +func NewUserSamples(config model.Reader) *UserSamples { tokenizer := NewTokenizer(0) s := make([]*UserSample, 0) err := config.UnmarshalKey("logs_config.auto_multi_line_detection_custom_samples", &s) @@ -50,21 +55,40 @@ func NewUserSamples(config config.Reader) *UserSamples { } } + legacyAdditionalPatterns := config.GetStringSlice("logs_config.auto_multi_line_extra_patterns") + if len(legacyAdditionalPatterns) > 0 { + log.Warn("Found deprecated logs_config.auto_multi_line_extra_patterns converting to logs_config.auto_multi_line_detection_custom_samples") + for _, pattern := range legacyAdditionalPatterns { + s = append(s, &UserSample{ + Regex: pattern, + }) + } + } + parsedSamples := make([]*UserSample, 0, len(s)) for _, sample := range s { - if sample.Sample == "" { - log.Warn("Sample was empty, skipping sample") - continue - } - sample.tokens, _ = tokenizer.tokenize([]byte(sample.Sample)) - if sample.MatchThreshold != nil { - if *sample.MatchThreshold <= 0 || *sample.MatchThreshold > 1 { - log.Warnf("Invalid match threshold %f, skipping sample", *sample.MatchThreshold) + if sample.Sample != "" { + sample.tokens, _ = tokenizer.tokenize([]byte(sample.Sample)) + + if sample.MatchThreshold != nil { + if *sample.MatchThreshold <= 0 || *sample.MatchThreshold > 1 { + log.Warnf("Invalid match threshold %f, skipping sample", *sample.MatchThreshold) + continue + } + sample.matchThreshold = *sample.MatchThreshold + } else { + sample.matchThreshold = defaultMatchThreshold + } + } else if sample.Regex != "" { + compiled, err := regexp.Compile("^" + sample.Regex) + if err != nil { + log.Warn(sample.Regex, " is not a valid regular expression - skipping") continue } - sample.matchThreshold = *sample.MatchThreshold + sample.compiledRegex = compiled } else { - sample.matchThreshold = defaultMatchThreshold + log.Warn("Sample and regex was empty, skipping") + continue } if sample.Label != nil { @@ -100,7 +124,13 @@ func (j *UserSamples) ProcessAndContinue(context *messageContext) bool { } for _, sample := range j.samples { - if isMatch(sample.tokens, context.tokens, sample.matchThreshold) { + if sample.compiledRegex != nil { + if sample.compiledRegex.Match(context.rawMessage) { + context.label = sample.label + context.labelAssignedBy = "user_sample" + return false + } + } else if isMatch(sample.tokens, context.tokens, sample.matchThreshold) { context.label = sample.label context.labelAssignedBy = "user_sample" return false diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples_test.go index 33f15698d0e7a..db74b9efe4d55 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples_test.go @@ -180,3 +180,80 @@ logs_config: assert.Equal(t, test.expectedLabel, context.label, "Expected label %v, got %v", test.expectedLabel, context.label) } } + +func TestUserPatternsRegexProcess(t *testing.T) { + + datadogYaml := ` +logs_config: + auto_multi_line_extra_patterns: + - "le\\wacy" + auto_multi_line_detection_custom_samples: + - regex: "(foo|bar)test\\d+" +` + + mockConfig := mock.NewFromYAML(t, datadogYaml) + samples := NewUserSamples(mockConfig) + tokenizer := NewTokenizer(60) + + tests := []struct { + expectedLabel Label + shouldStop bool + input string + }{ + {aggregate, true, ""}, + {aggregate, true, "some random log line"}, + {aggregate, true, "2023-03-28T14:33:53.743350Z App started successfully"}, + {startGroup, false, "footest123 some other log line"}, + {startGroup, false, "bartest123 some other log line"}, + {startGroup, false, "legacy pattern should match me"}, + {aggregate, true, "!!![$Not_close_enough%] some other log line"}, + } + + for _, test := range tests { + context := &messageContext{ + rawMessage: []byte(test.input), + label: aggregate, + } + + assert.True(t, tokenizer.ProcessAndContinue(context)) + assert.Equal(t, test.shouldStop, samples.ProcessAndContinue(context), "Expected stop %v, got %v", test.shouldStop, samples.ProcessAndContinue(context)) + assert.Equal(t, test.expectedLabel, context.label, "Expected label %v, got %v", test.expectedLabel, context.label) + } +} + +func TestUserPatternsProcessRegexCustomSettings(t *testing.T) { + + datadogYaml := ` +logs_config: + auto_multi_line_detection_custom_samples: + - regex: "(foo|bar)test\\d+" + label: no_aggregate +` + + mockConfig := mock.NewFromYAML(t, datadogYaml) + samples := NewUserSamples(mockConfig) + tokenizer := NewTokenizer(60) + + tests := []struct { + expectedLabel Label + shouldStop bool + input string + }{ + {aggregate, true, ""}, + {aggregate, true, "some random log line"}, + {aggregate, true, "2023-03-28T14:33:53.743350Z App started successfully"}, + {noAggregate, false, "footest123 some other log line"}, + {noAggregate, false, "bartest123 some other log line"}, + } + + for _, test := range tests { + context := &messageContext{ + rawMessage: []byte(test.input), + label: aggregate, + } + + assert.True(t, tokenizer.ProcessAndContinue(context)) + assert.Equal(t, test.shouldStop, samples.ProcessAndContinue(context), "Expected stop %v, got %v", test.shouldStop, samples.ProcessAndContinue(context)) + assert.Equal(t, test.expectedLabel, context.label, "Expected label %v, got %v", test.expectedLabel, context.label) + } +} diff --git a/pkg/logs/internal/decoder/auto_multiline_handler.go b/pkg/logs/internal/decoder/auto_multiline_handler.go index bf69b41693415..e8253cd764d69 100644 --- a/pkg/logs/internal/decoder/auto_multiline_handler.go +++ b/pkg/logs/internal/decoder/auto_multiline_handler.go @@ -8,7 +8,7 @@ package decoder import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" automultilinedetection "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder/auto_multiline_detection" "github.com/DataDog/datadog-agent/pkg/logs/message" status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" @@ -26,22 +26,22 @@ func NewAutoMultilineHandler(outputFn func(m *message.Message), maxContentSize i // Order is important heuristics := []automultilinedetection.Heuristic{} - heuristics = append(heuristics, automultilinedetection.NewTokenizer(config.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes"))) + heuristics = append(heuristics, automultilinedetection.NewTokenizer(pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes"))) - if config.Datadog().GetBool("logs_config.auto_multi_line.enable_json_detection") { + if pkgconfigsetup.Datadog().GetBool("logs_config.auto_multi_line.enable_json_detection") { heuristics = append(heuristics, automultilinedetection.NewJSONDetector()) } - heuristics = append(heuristics, automultilinedetection.NewUserSamples(config.Datadog())) + heuristics = append(heuristics, automultilinedetection.NewUserSamples(pkgconfigsetup.Datadog())) - if config.Datadog().GetBool("logs_config.auto_multi_line.enable_datetime_detection") { + if pkgconfigsetup.Datadog().GetBool("logs_config.auto_multi_line.enable_datetime_detection") { heuristics = append(heuristics, automultilinedetection.NewTimestampDetector( - config.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold"))) + pkgconfigsetup.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold"))) } analyticsHeuristics := []automultilinedetection.Heuristic{automultilinedetection.NewPatternTable( - config.Datadog().GetInt("logs_config.auto_multi_line.pattern_table_max_size"), - config.Datadog().GetFloat64("logs_config.auto_multi_line.pattern_table_match_threshold"), + pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line.pattern_table_max_size"), + pkgconfigsetup.Datadog().GetFloat64("logs_config.auto_multi_line.pattern_table_match_threshold"), tailerInfo), } @@ -51,8 +51,8 @@ func NewAutoMultilineHandler(outputFn func(m *message.Message), maxContentSize i outputFn, maxContentSize, flushTimeout, - config.Datadog().GetBool("logs_config.tag_truncated_logs"), - config.Datadog().GetBool("logs_config.tag_auto_multi_line_logs"), + pkgconfigsetup.Datadog().GetBool("logs_config.tag_truncated_logs"), + pkgconfigsetup.Datadog().GetBool("logs_config.tag_multi_line_logs"), tailerInfo), } } diff --git a/pkg/logs/internal/decoder/decoder.go b/pkg/logs/internal/decoder/decoder.go index 468cab5f81bee..b3d354b64411f 100644 --- a/pkg/logs/internal/decoder/decoder.go +++ b/pkg/logs/internal/decoder/decoder.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" //nolint:revive // TODO(AML) Fix revive linter - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/framer" "github.com/DataDog/datadog-agent/pkg/logs/internal/parsers" "github.com/DataDog/datadog-agent/pkg/logs/message" @@ -83,7 +83,7 @@ func syncSourceInfo(source *sources.ReplaceableSource, lh *MultiLineHandler) { func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Parser, framing framer.Framing, multiLinePattern *regexp.Regexp, tailerInfo *status.InfoRegistry) *Decoder { inputChan := make(chan *message.Message) outputChan := make(chan *message.Message) - maxContentSize := config.MaxMessageSizeBytes(pkgConfig.Datadog()) + maxContentSize := config.MaxMessageSizeBytes(pkgconfigsetup.Datadog()) detectedPattern := &DetectedPattern{} outputFn := func(m *message.Message) { outputChan <- m } @@ -92,17 +92,17 @@ func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Par var lineHandler LineHandler for _, rule := range source.Config().ProcessingRules { if rule.Type == config.MultiLine { - lh := NewMultiLineHandler(outputFn, rule.Regex, config.AggregationTimeout(pkgConfig.Datadog()), maxContentSize, false, tailerInfo) + lh := NewMultiLineHandler(outputFn, rule.Regex, config.AggregationTimeout(pkgconfigsetup.Datadog()), maxContentSize, false, tailerInfo, "multi_line") syncSourceInfo(source, lh) lineHandler = lh } } if lineHandler == nil { - if source.Config().ExperimentalAutoMultiLineEnabled(pkgConfig.Datadog()) { + if source.Config().ExperimentalAutoMultiLineEnabled(pkgconfigsetup.Datadog()) { log.Infof("Experimental Auto multi line log detection enabled") - lineHandler = NewAutoMultilineHandler(outputFn, maxContentSize, config.AggregationTimeout(pkgConfig.Datadog()), tailerInfo) + lineHandler = NewAutoMultilineHandler(outputFn, maxContentSize, config.AggregationTimeout(pkgconfigsetup.Datadog()), tailerInfo) - } else if source.Config().AutoMultiLineEnabled(pkgConfig.Datadog()) { + } else if source.Config().AutoMultiLineEnabled(pkgconfigsetup.Datadog()) { log.Infof("Auto multi line log detection enabled") if multiLinePattern != nil { @@ -111,7 +111,7 @@ func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Par // Save the pattern again for the next rotation detectedPattern.Set(multiLinePattern) - lh := NewMultiLineHandler(outputFn, multiLinePattern, config.AggregationTimeout(pkgConfig.Datadog()), maxContentSize, true, tailerInfo) + lh := NewMultiLineHandler(outputFn, multiLinePattern, config.AggregationTimeout(pkgconfigsetup.Datadog()), maxContentSize, true, tailerInfo, "legacy_auto_multi_line") syncSourceInfo(source, lh) lineHandler = lh } else { @@ -125,7 +125,7 @@ func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Par // construct the lineParser, wrapping the parser var lineParser LineParser if parser.SupportsPartialLine() { - lineParser = NewMultiLineParser(lineHandler, config.AggregationTimeout(pkgConfig.Datadog()), parser, maxContentSize) + lineParser = NewMultiLineParser(lineHandler, config.AggregationTimeout(pkgconfigsetup.Datadog()), parser, maxContentSize) } else { lineParser = NewSingleLineParser(lineHandler, parser) } @@ -139,13 +139,13 @@ func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Par func buildLegacyAutoMultilineHandlerFromConfig(outputFn func(*message.Message), maxContentSize int, source *sources.ReplaceableSource, detectedPattern *DetectedPattern, tailerInfo *status.InfoRegistry) *LegacyAutoMultilineHandler { linesToSample := source.Config().AutoMultiLineSampleSize if linesToSample <= 0 { - linesToSample = pkgConfig.Datadog().GetInt("logs_config.auto_multi_line_default_sample_size") + linesToSample = pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line_default_sample_size") } matchThreshold := source.Config().AutoMultiLineMatchThreshold if matchThreshold == 0 { - matchThreshold = pkgConfig.Datadog().GetFloat64("logs_config.auto_multi_line_default_match_threshold") + matchThreshold = pkgconfigsetup.Datadog().GetFloat64("logs_config.auto_multi_line_default_match_threshold") } - additionalPatterns := pkgConfig.Datadog().GetStringSlice("logs_config.auto_multi_line_extra_patterns") + additionalPatterns := pkgconfigsetup.Datadog().GetStringSlice("logs_config.auto_multi_line_extra_patterns") additionalPatternsCompiled := []*regexp.Regexp{} for _, p := range additionalPatterns { @@ -157,14 +157,14 @@ func buildLegacyAutoMultilineHandlerFromConfig(outputFn func(*message.Message), additionalPatternsCompiled = append(additionalPatternsCompiled, compiled) } - matchTimeout := time.Second * pkgConfig.Datadog().GetDuration("logs_config.auto_multi_line_default_match_timeout") + matchTimeout := time.Second * pkgconfigsetup.Datadog().GetDuration("logs_config.auto_multi_line_default_match_timeout") return NewLegacyAutoMultilineHandler( outputFn, maxContentSize, linesToSample, matchThreshold, matchTimeout, - config.AggregationTimeout(pkgConfig.Datadog()), + config.AggregationTimeout(pkgconfigsetup.Datadog()), source, additionalPatternsCompiled, detectedPattern, diff --git a/pkg/logs/internal/decoder/file_decoder.go b/pkg/logs/internal/decoder/file_decoder.go index 48b8d901826e0..2cb7303f3242a 100644 --- a/pkg/logs/internal/decoder/file_decoder.go +++ b/pkg/logs/internal/decoder/file_decoder.go @@ -9,7 +9,7 @@ import ( "regexp" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/framer" "github.com/DataDog/datadog-agent/pkg/logs/internal/parsers" "github.com/DataDog/datadog-agent/pkg/logs/internal/parsers/dockerfile" @@ -36,7 +36,7 @@ func NewDecoderFromSourceWithPattern(source *sources.ReplaceableSource, multiLin case sources.KubernetesSourceType: lineParser = kubernetes.New() case sources.DockerSourceType: - if coreConfig.Datadog().GetBool("logs_config.use_podman_logs") { + if pkgconfigsetup.Datadog().GetBool("logs_config.use_podman_logs") { // podman's on-disk logs are in kubernetes format lineParser = kubernetes.New() } else { diff --git a/pkg/logs/internal/decoder/legacy_auto_multiline_handler.go b/pkg/logs/internal/decoder/legacy_auto_multiline_handler.go index 8e7b0a1321764..84c4687f00832 100644 --- a/pkg/logs/internal/decoder/legacy_auto_multiline_handler.go +++ b/pkg/logs/internal/decoder/legacy_auto_multiline_handler.go @@ -207,7 +207,7 @@ func (h *LegacyAutoMultilineHandler) switchToMultilineHandler(r *regexp.Regexp) h.singleLineHandler = nil // Build and start a multiline-handler - h.multiLineHandler = NewMultiLineHandler(h.outputFn, r, h.flushTimeout, h.lineLimit, true, h.tailerInfo) + h.multiLineHandler = NewMultiLineHandler(h.outputFn, r, h.flushTimeout, h.lineLimit, true, h.tailerInfo, "legacy_auto_multi_line") h.source.RegisterInfo(h.multiLineHandler.countInfo) h.source.RegisterInfo(h.multiLineHandler.linesCombinedInfo) // stay with the multiline handler diff --git a/pkg/logs/internal/decoder/line_handler_benchmark_test.go b/pkg/logs/internal/decoder/line_handler_benchmark_test.go index 85d3266c920f4..26947159172c2 100644 --- a/pkg/logs/internal/decoder/line_handler_benchmark_test.go +++ b/pkg/logs/internal/decoder/line_handler_benchmark_test.go @@ -12,7 +12,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/sources" status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" @@ -24,7 +24,7 @@ func benchmarkSingleLineHandler(b *testing.B, logs int) { messages[i] = getDummyMessageWithLF(fmt.Sprintf("This is a log test line to benchmark the logs agent %d", i)) } - h := NewSingleLineHandler(func(*message.Message) {}, coreConfig.DefaultMaxMessageSizeBytes) + h := NewSingleLineHandler(func(*message.Message) {}, pkgconfigsetup.DefaultMaxMessageSizeBytes) b.ResetTimer() for n := 0; n < b.N; n++ { @@ -41,7 +41,7 @@ func benchmarkAutoMultiLineHandler(b *testing.B, logs int, line string) { } source := sources.NewReplaceableSource(sources.NewLogSource("config", &config.LogsConfig{})) - h := NewLegacyAutoMultilineHandler(func(*message.Message) {}, coreConfig.DefaultMaxMessageSizeBytes, 1000, 0.9, 30*time.Second, 1000*time.Millisecond, source, []*regexp.Regexp{}, &DetectedPattern{}, status.NewInfoRegistry()) + h := NewLegacyAutoMultilineHandler(func(*message.Message) {}, pkgconfigsetup.DefaultMaxMessageSizeBytes, 1000, 0.9, 30*time.Second, 1000*time.Millisecond, source, []*regexp.Regexp{}, &DetectedPattern{}, status.NewInfoRegistry()) b.ResetTimer() for n := 0; n < b.N; n++ { @@ -57,7 +57,7 @@ func benchmarkMultiLineHandler(b *testing.B, logs int, line string) { messages[i] = getDummyMessageWithLF(fmt.Sprintf("%s %d", line, i)) } - h := NewMultiLineHandler(func(*message.Message) {}, regexp.MustCompile(`^[A-Za-z_]+ \d+, \d+ \d+:\d+:\d+ (AM|PM)`), 1000*time.Millisecond, 100, false, status.NewInfoRegistry()) + h := NewMultiLineHandler(func(*message.Message) {}, regexp.MustCompile(`^[A-Za-z_]+ \d+, \d+ \d+:\d+:\d+ (AM|PM)`), 1000*time.Millisecond, 100, false, status.NewInfoRegistry(), "") b.ResetTimer() for n := 0; n < b.N; n++ { diff --git a/pkg/logs/internal/decoder/line_handler_test.go b/pkg/logs/internal/decoder/line_handler_test.go index 24584506dfa9d..3cac2d6d21032 100644 --- a/pkg/logs/internal/decoder/line_handler_test.go +++ b/pkg/logs/internal/decoder/line_handler_test.go @@ -96,7 +96,7 @@ func TestTrimSingleLine(t *testing.T) { func TestMultiLineHandler(t *testing.T) { re := regexp.MustCompile(`[0-9]+\.`) outputFn, outputChan := lineHandlerChans() - h := NewMultiLineHandler(outputFn, re, 250*time.Millisecond, 20, false, status.NewInfoRegistry()) + h := NewMultiLineHandler(outputFn, re, 250*time.Millisecond, 20, false, status.NewInfoRegistry(), "") var output *message.Message @@ -187,7 +187,7 @@ func TestMultiLineHandler(t *testing.T) { func TestTrimMultiLine(t *testing.T) { re := regexp.MustCompile(`[0-9]+\.`) outputFn, outputChan := lineHandlerChans() - h := NewMultiLineHandler(outputFn, re, 250*time.Millisecond, 100, false, status.NewInfoRegistry()) + h := NewMultiLineHandler(outputFn, re, 250*time.Millisecond, 100, false, status.NewInfoRegistry(), "") var output *message.Message @@ -216,7 +216,7 @@ func TestTrimMultiLine(t *testing.T) { func TestMultiLineHandlerDropsEmptyMessages(t *testing.T) { re := regexp.MustCompile(`[0-9]+\.`) outputFn, outputChan := lineHandlerChans() - h := NewMultiLineHandler(outputFn, re, 250*time.Millisecond, 100, false, status.NewInfoRegistry()) + h := NewMultiLineHandler(outputFn, re, 250*time.Millisecond, 100, false, status.NewInfoRegistry(), "") h.process(getDummyMessage("")) @@ -245,7 +245,7 @@ func TestSingleLineHandlerSendsRawInvalidMessages(t *testing.T) { func TestMultiLineHandlerSendsRawInvalidMessages(t *testing.T) { re := regexp.MustCompile(`[0-9]+\.`) outputFn, outputChan := lineHandlerChans() - h := NewMultiLineHandler(outputFn, re, 250*time.Millisecond, 100, false, status.NewInfoRegistry()) + h := NewMultiLineHandler(outputFn, re, 250*time.Millisecond, 100, false, status.NewInfoRegistry(), "") h.process(getDummyMessage("1.third line")) h.process(getDummyMessage("fourth line")) diff --git a/pkg/logs/internal/decoder/multiline_handler.go b/pkg/logs/internal/decoder/multiline_handler.go index 5fdeb2a80ce3f..67fba384cbb42 100644 --- a/pkg/logs/internal/decoder/multiline_handler.go +++ b/pkg/logs/internal/decoder/multiline_handler.go @@ -10,7 +10,7 @@ import ( "regexp" "time" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -38,10 +38,11 @@ type MultiLineHandler struct { linesCombinedInfo *status.CountInfo telemetryEnabled bool linesCombined int + multiLineTagValue string } // NewMultiLineHandler returns a new MultiLineHandler. -func NewMultiLineHandler(outputFn func(*message.Message), newContentRe *regexp.Regexp, flushTimeout time.Duration, lineLimit int, telemetryEnabled bool, tailerInfo *status.InfoRegistry) *MultiLineHandler { +func NewMultiLineHandler(outputFn func(*message.Message), newContentRe *regexp.Regexp, flushTimeout time.Duration, lineLimit int, telemetryEnabled bool, tailerInfo *status.InfoRegistry, multiLineTagValue string) *MultiLineHandler { i := status.NewMappedInfo("Multi-Line Pattern") i.SetMessage("Pattern", newContentRe.String()) @@ -57,6 +58,7 @@ func NewMultiLineHandler(outputFn func(*message.Message), newContentRe *regexp.R linesCombinedInfo: status.NewCountInfo("Lines Combined"), telemetryEnabled: telemetryEnabled, linesCombined: 0, + multiLineTagValue: multiLineTagValue, } return h } @@ -163,8 +165,11 @@ func (h *MultiLineHandler) sendBuffer() { } msg := message.NewRawMessage(content, h.status, h.linesLen, h.timestamp) msg.ParsingExtra.IsTruncated = h.isBufferTruncated - if h.isBufferTruncated && coreConfig.Datadog().GetBool("logs_config.tag_truncated_logs") { - msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedTag) + if h.isBufferTruncated && pkgconfigsetup.Datadog().GetBool("logs_config.tag_truncated_logs") { + msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedReasonTag("multiline_regex")) + } + if h.isBufferTruncated && pkgconfigsetup.Datadog().GetBool("logs_config.tag_multi_line_logs") { + msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.MultiLineSourceTag(h.multiLineTagValue)) } h.outputFn(msg) } diff --git a/pkg/logs/internal/decoder/single_line_handler.go b/pkg/logs/internal/decoder/single_line_handler.go index 4f2b192561c24..34f5fa8c3ec02 100644 --- a/pkg/logs/internal/decoder/single_line_handler.go +++ b/pkg/logs/internal/decoder/single_line_handler.go @@ -9,7 +9,7 @@ import ( "bytes" "time" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" ) @@ -38,8 +38,8 @@ func (h *SingleLineHandler) flush() { } func addTruncatedTag(msg *message.Message) { - if coreConfig.Datadog().GetBool("logs_config.tag_truncated_logs") { - msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedTag) + if pkgconfigsetup.Datadog().GetBool("logs_config.tag_truncated_logs") { + msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedReasonTag("single_line")) } } diff --git a/pkg/logs/internal/tag/local_provider.go b/pkg/logs/internal/tag/local_provider.go index fb0898b48cde6..c0e49730b8e73 100644 --- a/pkg/logs/internal/tag/local_provider.go +++ b/pkg/logs/internal/tag/local_provider.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/benbjohnson/clock" ) @@ -38,12 +38,12 @@ func newLocalProviderWithClock(t []string, clock clock.Clock) Provider { expectedTags: t, } - if config.IsExpectedTagsSet(coreConfig.Datadog()) { - p.expectedTags = append(p.tags, hostMetadataUtils.Get(context.TODO(), false, coreConfig.Datadog()).System...) + if config.IsExpectedTagsSet(pkgconfigsetup.Datadog()) { + p.expectedTags = append(p.tags, hostMetadataUtils.Get(context.TODO(), false, pkgconfigsetup.Datadog()).System...) // expected tags deadline is based on the agent start time, which may have been earlier // than the current time. - expectedTagsDeadline := coreConfig.StartTime.Add(coreConfig.Datadog().GetDuration("logs_config.expected_tags_duration")) + expectedTagsDeadline := pkgconfigsetup.StartTime.Add(pkgconfigsetup.Datadog().GetDuration("logs_config.expected_tags_duration")) // reset submitExpectedTags after deadline elapsed clock.AfterFunc(expectedTagsDeadline.Sub(clock.Now()), func() { diff --git a/pkg/logs/internal/tag/local_provider_test.go b/pkg/logs/internal/tag/local_provider_test.go index 5a8ed5e1f5280..77c0d50a9a3ac 100644 --- a/pkg/logs/internal/tag/local_provider_test.go +++ b/pkg/logs/internal/tag/local_provider_test.go @@ -13,8 +13,8 @@ import ( "github.com/benbjohnson/clock" "github.com/stretchr/testify/assert" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestLocalProviderShouldReturnEmptyList(t *testing.T) { @@ -36,10 +36,10 @@ func TestLocalProviderExpectedTags(t *testing.T) { mockConfig := configmock.New(t) clock := clock.NewMock() - oldStartTime := coreConfig.StartTime - coreConfig.StartTime = clock.Now() + oldStartTime := pkgconfigsetup.StartTime + pkgconfigsetup.StartTime = clock.Now() defer func() { - coreConfig.StartTime = oldStartTime + pkgconfigsetup.StartTime = oldStartTime }() tags := []string{"tag1:value1", "tag2", "tag3"} diff --git a/pkg/logs/internal/tag/provider.go b/pkg/logs/internal/tag/provider.go index b26b34bad479f..7196bacb02646 100644 --- a/pkg/logs/internal/tag/provider.go +++ b/pkg/logs/internal/tag/provider.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -46,7 +46,7 @@ func NewProvider(entityID string, tagAdder EntityTagAdder) Provider { func newProviderWithClock(entityID string, clock clock.Clock, tagAdder EntityTagAdder) Provider { p := &provider{ entityID: entityID, - taggerWarmupDuration: config.TaggerWarmupDuration(pkgConfig.Datadog()), + taggerWarmupDuration: config.TaggerWarmupDuration(pkgconfigsetup.Datadog()), localTagProvider: newLocalProviderWithClock([]string{}, clock), clock: clock, tagAdder: tagAdder, diff --git a/pkg/logs/internal/tag/provider_benchmark_test.go b/pkg/logs/internal/tag/provider_benchmark_test.go index 2ebc319fd9785..8b2ece36e2d83 100644 --- a/pkg/logs/internal/tag/provider_benchmark_test.go +++ b/pkg/logs/internal/tag/provider_benchmark_test.go @@ -10,16 +10,16 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" model "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func setupConfig(t testing.TB, tags []string) (model.Config, time.Time) { mockConfig := configmock.New(t) - startTime := config.StartTime - config.StartTime = time.Now() + startTime := pkgconfigsetup.StartTime + pkgconfigsetup.StartTime = time.Now() mockConfig.SetWithoutSource("tags", tags) @@ -37,7 +37,7 @@ func BenchmarkProviderExpectedTags(b *testing.B) { m, start := setupConfig(b, []string{"tag1:value1", "tag2", "tag3"}) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() defer m.SetWithoutSource("tags", nil) @@ -58,7 +58,7 @@ func BenchmarkProviderExpectedTagsEmptySlice(b *testing.B) { m, start := setupConfig(b, []string{}) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() if len(m.GetStringSlice("tags")) > 0 { @@ -81,7 +81,7 @@ func BenchmarkProviderExpectedTagsNil(b *testing.B) { m, start := setupConfig(b, nil) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() if len(m.GetStringSlice("tags")) > 0 { @@ -104,7 +104,7 @@ func BenchmarkProviderNoExpectedTags(b *testing.B) { m, start := setupConfig(b, []string{"tag1:value1", "tag2", "tag3"}) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() defer m.SetWithoutSource("tags", nil) @@ -124,7 +124,7 @@ func BenchmarkProviderNoExpectedTagsNil(b *testing.B) { m, start := setupConfig(b, nil) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() defer m.SetWithoutSource("tags", nil) diff --git a/pkg/logs/internal/tag/provider_test.go b/pkg/logs/internal/tag/provider_test.go index 13786ec561b6d..b238d9c58b542 100644 --- a/pkg/logs/internal/tag/provider_test.go +++ b/pkg/logs/internal/tag/provider_test.go @@ -15,8 +15,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestProviderExpectedTags(t *testing.T) { @@ -24,11 +24,11 @@ func TestProviderExpectedTags(t *testing.T) { clock := clock.NewMock() fakeTagger := taggerimpl.SetupFakeTagger(t) defer fakeTagger.ResetTagger() - oldStartTime := coreConfig.StartTime + oldStartTime := pkgconfigsetup.StartTime then := clock.Now() - coreConfig.StartTime = then + pkgconfigsetup.StartTime = then defer func() { - coreConfig.StartTime = oldStartTime + pkgconfigsetup.StartTime = oldStartTime }() tags := []string{"tag1:value1", "tag2", "tag3"} diff --git a/pkg/logs/internal/util/containersorpods/containers_or_pods.go b/pkg/logs/internal/util/containersorpods/containers_or_pods.go index 4ee689bff2768..010c34c68c710 100644 --- a/pkg/logs/internal/util/containersorpods/containers_or_pods.go +++ b/pkg/logs/internal/util/containersorpods/containers_or_pods.go @@ -11,8 +11,8 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -158,7 +158,7 @@ func (ch *chooser) start() { // preferred returns the preferred LogWhat, based on configuration func (ch *chooser) preferred() LogWhat { - if config.Datadog().GetBool("logs_config.k8s_container_use_file") { + if pkgconfigsetup.Datadog().GetBool("logs_config.k8s_container_use_file") { return LogPods } return LogContainers diff --git a/pkg/logs/internal/util/containersorpods/containers_or_pods_test.go b/pkg/logs/internal/util/containersorpods/containers_or_pods_test.go index 5039a6fc4d2b9..dd30cd6b255ca 100644 --- a/pkg/logs/internal/util/containersorpods/containers_or_pods_test.go +++ b/pkg/logs/internal/util/containersorpods/containers_or_pods_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) @@ -29,7 +28,7 @@ func TestChoose(t *testing.T) { expected LogWhat, ) func(*testing.T) { return func(t *testing.T) { - config.SetFeatures(t, features...) + env.SetFeatures(t, features...) mockConfig := configmock.New(t) mockConfig.SetWithoutSource("logs_config.k8s_container_use_file", k8sContainerUseFile) diff --git a/pkg/logs/launchers/container/tailerfactory/file.go b/pkg/logs/launchers/container/tailerfactory/file.go index 9ef2b0f06777c..02289d82ae94b 100644 --- a/pkg/logs/launchers/container/tailerfactory/file.go +++ b/pkg/logs/launchers/container/tailerfactory/file.go @@ -20,7 +20,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util/containersorpods" "github.com/DataDog/datadog-agent/pkg/logs/launchers/container/tailerfactory/tailers" "github.com/DataDog/datadog-agent/pkg/logs/sources" @@ -33,7 +33,7 @@ import ( var podLogsBasePath = "/var/log/pods" var dockerLogsBasePathNix = "/var/lib/docker" var dockerLogsBasePathWin = "c:\\programdata\\docker" -var podmanLogsBasePath = "/var/lib/containers" +var podmanRootfullLogsBasePath = "/var/lib/containers" // makeFileTailer makes a file-based tailer for the given source, or returns // an error if it cannot do so (e.g., due to permission errors) @@ -147,7 +147,7 @@ func (tf *factory) makeDockerFileSource(source *sources.LogSource) (*sources.Log func (tf *factory) findDockerLogPath(containerID string) string { // if the user has set a custom docker data root, this will pick it up // and set it in place of the usual docker base path - overridePath := coreConfig.Datadog().GetString("logs_config.docker_path_override") + overridePath := pkgconfigsetup.Datadog().GetString("logs_config.docker_path_override") if len(overridePath) > 0 { return filepath.Join(overridePath, "containers", containerID, fmt.Sprintf("%s-json.log", containerID)) } @@ -160,7 +160,14 @@ func (tf *factory) findDockerLogPath(containerID string) string { default: // linux, darwin // this config flag provides temporary support for podman while it is // still recognized by AD as a "docker" runtime. - if coreConfig.Datadog().GetBool("logs_config.use_podman_logs") { + if pkgconfigsetup.Datadog().GetBool("logs_config.use_podman_logs") { + // Default path for podman rootfull containers + podmanLogsBasePath := podmanRootfullLogsBasePath + podmanDBPath := pkgconfigsetup.Datadog().GetString("podman_db_path") + // User provided a custom podman DB path, they are running rootless containers or modified the root directory. + if len(podmanDBPath) > 0 { + podmanLogsBasePath = log.ExtractPodmanRootDirFromDBPath(podmanDBPath) + } return filepath.Join( podmanLogsBasePath, "storage/overlay-containers", containerID, "userdata/ctr.log") diff --git a/pkg/logs/launchers/container/tailerfactory/file_test.go b/pkg/logs/launchers/container/tailerfactory/file_test.go index b98f9102c7308..314b804bfe8f0 100644 --- a/pkg/logs/launchers/container/tailerfactory/file_test.go +++ b/pkg/logs/launchers/container/tailerfactory/file_test.go @@ -43,7 +43,7 @@ func fileTestSetup(t *testing.T) { oldPodLogsBasePath, podLogsBasePath = podLogsBasePath, filepath.Join(tmp, "pods") oldDockerLogsBasePathNix, dockerLogsBasePathNix = dockerLogsBasePathNix, filepath.Join(tmp, "docker-nix") oldDockerLogsBasePathWin, dockerLogsBasePathWin = dockerLogsBasePathWin, filepath.Join(tmp, "docker-win") - oldPodmanLogsBasePath, podmanLogsBasePath = podmanLogsBasePath, filepath.Join(tmp, "containers") + oldPodmanLogsBasePath, podmanRootfullLogsBasePath = podmanRootfullLogsBasePath, filepath.Join(tmp, "containers") switch runtime.GOOS { case "windows": @@ -56,7 +56,7 @@ func fileTestSetup(t *testing.T) { podLogsBasePath = oldPodLogsBasePath dockerLogsBasePathNix = oldDockerLogsBasePathNix dockerLogsBasePathWin = oldDockerLogsBasePathWin - podmanLogsBasePath = oldPodmanLogsBasePath + podmanRootfullLogsBasePath = oldPodmanLogsBasePath }) } @@ -145,7 +145,7 @@ func TestMakeFileSource_podman_success(t *testing.T) { t.Skip("Skip on Windows due to WSL file path abstraction") } - p := filepath.Join(podmanLogsBasePath, filepath.FromSlash("storage/overlay-containers/abc/userdata/ctr.log")) + p := filepath.Join(podmanRootfullLogsBasePath, filepath.FromSlash("storage/overlay-containers/abc/userdata/ctr.log")) require.NoError(t, os.MkdirAll(filepath.Dir(p), 0o777)) require.NoError(t, os.WriteFile(p, []byte("{}"), 0o666)) @@ -178,6 +178,45 @@ func TestMakeFileSource_podman_success(t *testing.T) { require.Equal(t, source.Config.AutoMultiLineMatchThreshold, 0.321) } +func TestMakeFileSource_podman_with_db_path_success(t *testing.T) { + tmp := t.TempDir() + customPath := filepath.Join(tmp, "/custom/path/containers/storage/db.sql") + mockConfig := configmock.New(t) + mockConfig.SetWithoutSource("logs_config.use_podman_logs", true) + mockConfig.SetWithoutSource("podman_db_path", customPath) + + // On Windows, podman runs within a Linux virtual machine, so the Agent would believe it runs in a Linux environment with all the paths being nix-like. + // The real path on the system is abstracted by the Windows Subsystem for Linux layer, so this unit test is skipped. + // Ref: https://github.com/containers/podman/blob/main/docs/tutorials/podman-for-windows.md + if runtime.GOOS == "windows" { + t.Skip("Skip on Windows due to WSL file path abstraction") + } + + p := filepath.Join(filepath.Join(tmp, "/custom/path/containers"), filepath.FromSlash("storage/overlay-containers/abc/userdata/ctr.log")) + require.NoError(t, os.MkdirAll(filepath.Dir(p), 0o777)) + require.NoError(t, os.WriteFile(p, []byte("{}"), 0o666)) + + tf := &factory{ + pipelineProvider: pipeline.NewMockProvider(), + cop: containersorpods.NewDecidedChooser(containersorpods.LogContainers), + } + source := sources.NewLogSource("test", &config.LogsConfig{ + Type: "podman", + Identifier: "abc", + Source: "src", + Service: "svc", + }) + child, err := tf.makeFileSource(source) + require.NoError(t, err) + require.Equal(t, source.Name, child.Name) + require.Equal(t, "file", child.Config.Type) + require.Equal(t, source.Config.Identifier, child.Config.Identifier) + require.Equal(t, p, child.Config.Path) + require.Equal(t, source.Config.Source, child.Config.Source) + require.Equal(t, source.Config.Service, child.Config.Service) + require.Equal(t, sources.DockerSourceType, child.GetSourceType()) +} + func TestMakeFileSource_docker_no_file(t *testing.T) { fileTestSetup(t) diff --git a/pkg/logs/launchers/container/tailerfactory/socket.go b/pkg/logs/launchers/container/tailerfactory/socket.go index 714be2815d44c..5e5fbdda1e7da 100644 --- a/pkg/logs/launchers/container/tailerfactory/socket.go +++ b/pkg/logs/launchers/container/tailerfactory/socket.go @@ -15,7 +15,7 @@ import ( "fmt" "time" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/launchers/container/tailerfactory/tailers" "github.com/DataDog/datadog-agent/pkg/logs/sources" ) @@ -43,7 +43,7 @@ func (tf *factory) makeSocketTailer(source *sources.LogSource) (Tailer, error) { // available at some point, so chances are good that tailing will succeed. pipeline := tf.pipelineProvider.NextPipelineChan() - readTimeout := time.Duration(coreConfig.Datadog().GetInt("logs_config.docker_client_read_timeout")) * time.Second + readTimeout := time.Duration(pkgconfigsetup.Datadog().GetInt("logs_config.docker_client_read_timeout")) * time.Second // apply defaults for source and service directly to the LogSource struct (!!) source.Config.Source, source.Config.Service = tf.defaultSourceAndService(source, tf.cop.Get()) diff --git a/pkg/logs/launchers/container/tailerfactory/usefile.go b/pkg/logs/launchers/container/tailerfactory/usefile.go index 0c2665602bae9..dbecd83b1f292 100644 --- a/pkg/logs/launchers/container/tailerfactory/usefile.go +++ b/pkg/logs/launchers/container/tailerfactory/usefile.go @@ -11,7 +11,7 @@ import ( "context" "fmt" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util/containersorpods" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -30,12 +30,12 @@ func (tf *factory) useFile(source *sources.LogSource) bool { switch logWhat { case containersorpods.LogContainers: // docker_container_use_file is a suggestion - if !coreConfig.Datadog().GetBool("logs_config.docker_container_use_file") { + if !pkgconfigsetup.Datadog().GetBool("logs_config.docker_container_use_file") { return false } // docker_container_force_use_file is a requirement - if coreConfig.Datadog().GetBool("logs_config.docker_container_force_use_file") { + if pkgconfigsetup.Datadog().GetBool("logs_config.docker_container_force_use_file") { return true } @@ -51,7 +51,7 @@ func (tf *factory) useFile(source *sources.LogSource) bool { return true case containersorpods.LogPods: - return coreConfig.Datadog().GetBool("logs_config.k8s_container_use_file") + return pkgconfigsetup.Datadog().GetBool("logs_config.k8s_container_use_file") default: // if this occurs, then sources have been arriving before the diff --git a/pkg/logs/launchers/file/launcher_test.go b/pkg/logs/launchers/file/launcher_test.go index 96c44e8031568..a0e53f93f7d3b 100644 --- a/pkg/logs/launchers/file/launcher_test.go +++ b/pkg/logs/launchers/file/launcher_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" auditor "github.com/DataDog/datadog-agent/pkg/logs/auditor/mock" "github.com/DataDog/datadog-agent/pkg/logs/internal/util" "github.com/DataDog/datadog-agent/pkg/logs/launchers" @@ -80,7 +80,7 @@ func (suite *LauncherTestSuite) SetupTest() { suite.s.pipelineProvider = suite.pipelineProvider suite.s.registry = auditor.NewRegistry() suite.s.activeSources = append(suite.s.activeSources, suite.source) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{suite.source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{suite.source})) suite.s.scan() } @@ -244,7 +244,7 @@ func TestLauncherScanStartNewTailer(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Identifier: configID, Path: path}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) defer status.Clear() // create file @@ -454,7 +454,7 @@ func TestLauncherScanWithTooManyFiles(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: path}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) defer status.Clear() // test at scan @@ -544,7 +544,7 @@ func TestLauncherScanRecentFilesWithRemoval(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) return launcher } @@ -602,7 +602,7 @@ func TestLauncherScanRecentFilesWithNewFiles(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) return launcher } @@ -665,7 +665,7 @@ func TestLauncherFileRotation(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) return launcher } @@ -732,7 +732,7 @@ func TestLauncherFileDetectionSingleScan(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) return launcher } diff --git a/pkg/logs/launchers/file/provider/file_provider_test.go b/pkg/logs/launchers/file/provider/file_provider_test.go index efa0ad125901d..caa20a4b5f074 100644 --- a/pkg/logs/launchers/file/provider/file_provider_test.go +++ b/pkg/logs/launchers/file/provider/file_provider_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/status" @@ -135,7 +135,7 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsAllFilesFromDirectory() { path := fmt.Sprintf("%s/1/*.log", suite.testDir) fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources(logSources)) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources(logSources)) files := fileProvider.FilesToTail(true, logSources) suite.Equal(3, len(files)) @@ -197,7 +197,7 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsSpecificFileWithWildcard() path := fmt.Sprintf("%s/1/?.log", suite.testDir) fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources(logSources)) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources(logSources)) files := fileProvider.FilesToTail(true, logSources) suite.Equal(3, len(files)) @@ -237,7 +237,7 @@ func (suite *ProviderTestSuite) TestNumberOfFilesToTailDoesNotExceedLimit() { path := fmt.Sprintf("%s/*/*.log", suite.testDir) fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources(logSources)) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources(logSources)) files := fileProvider.FilesToTail(true, logSources) suite.Equal(suite.filesLimit, len(files)) suite.Equal([]string{"3 files tailed out of 5 files matching"}, logSources[0].Messages.GetMessages()) @@ -256,7 +256,7 @@ func (suite *ProviderTestSuite) TestAllWildcardPathsAreUpdated() { sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/1/*.log", suite.testDir)}), sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/2/*.log", suite.testDir)}), } - status.InitStatus(pkgConfig.Datadog(), util.CreateSources(logSources)) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources(logSources)) files := fileProvider.FilesToTail(true, logSources) suite.Equal(2, len(files)) suite.Equal([]string{"2 files tailed out of 3 files matching"}, logSources[0].Messages.GetMessages()) diff --git a/pkg/logs/launchers/integration/launcher.go b/pkg/logs/launchers/integration/launcher.go index 1d8a9b15f2079..59f99dae868e8 100644 --- a/pkg/logs/launchers/integration/launcher.go +++ b/pkg/logs/launchers/integration/launcher.go @@ -7,15 +7,18 @@ package integration import ( + "math" "os" "path/filepath" "strings" + "time" + "github.com/DataDog/datadog-agent/pkg/util/filesystem" ddLog "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/comp/logs/agent/config" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/launchers" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" @@ -34,35 +37,65 @@ type Launcher struct { stop chan struct{} runPath string integrationsLogsChan chan integrations.IntegrationLog - integrationToFile map[string]string + integrationToFile map[string]*fileInfo + fileSizeMax int64 + combinedUsageMax int64 + combinedUsageSize int64 // writeLogToFile is used as a function pointer, so it can be overridden in // testing to make deterministic tests - writeFunction func(logFilePath, log string) error + writeLogToFileFunction func(filepath, log string) error +} + +// fileInfo stores information about each file that is needed in order to keep +// track of the combined and overall disk usage by the logs files +type fileInfo struct { + filename string + lastModified time.Time + size int64 } // NewLauncher creates and returns an integrations launcher, and creates the // path for integrations files to run in func NewLauncher(sources *sources.LogSources, integrationsLogsComp integrations.Component) *Launcher { - runPath := filepath.Join(pkgConfig.Datadog().GetString("logs_config.run_path"), "integrations") + datadogConfig := pkgconfigsetup.Datadog() + runPath := filepath.Join(datadogConfig.GetString("logs_config.run_path"), "integrations") err := os.MkdirAll(runPath, 0755) + if err != nil { - ddLog.Warn("Unable to make integrations logs directory: ", err) - return nil + ddLog.Error("Unable to create integrations logs directory:", err) + } + + logsTotalUsageSetting := datadogConfig.GetInt64("logs_config.integrations_logs_total_usage") * 1024 * 1024 + logsUsageRatio := datadogConfig.GetFloat64("logs_config.integrations_logs_disk_ratio") + maxDiskUsage, err := computeMaxDiskUsage(runPath, logsTotalUsageSetting, logsUsageRatio) + if err != nil { + ddLog.Warn("Unable to compute integrations logs max disk usage, using default value of 100 MB:", err) + maxDiskUsage = logsTotalUsageSetting } return &Launcher{ sources: sources, runPath: runPath, + fileSizeMax: datadogConfig.GetInt64("logs_config.integrations_logs_files_max_size") * 1024 * 1024, + combinedUsageMax: maxDiskUsage, + combinedUsageSize: 0, stop: make(chan struct{}), integrationsLogsChan: integrationsLogsComp.Subscribe(), addedConfigs: integrationsLogsComp.SubscribeIntegration(), - integrationToFile: make(map[string]string), - writeFunction: writeLogToFile, + integrationToFile: make(map[string]*fileInfo), + // Set the initial least recently modified time to the largest possible + // value, used for the first comparison + writeLogToFileFunction: writeLogToFile, } } // Start starts the launcher and launches the run loop in a go function func (s *Launcher) Start(_ launchers.SourceProvider, _ pipeline.Provider, _ auditor.Registry, _ *tailers.TailerTracker) { + err := s.scanInitialFiles(s.runPath) + if err != nil { + ddLog.Warn("Unable to scan existing log files:", err) + } + go s.run() } @@ -76,67 +109,161 @@ func (s *Launcher) run() { for { select { case cfg := <-s.addedConfigs: + sources, err := ad.CreateSources(cfg.Config) if err != nil { - ddLog.Warn("Failed to create source ", err) + ddLog.Error("Failed to create source ", err) continue } for _, source := range sources { // TODO: integrations should only be allowed to have one IntegrationType config. if source.Config.Type == config.IntegrationType { - logFilePath, err := s.createFile(cfg.IntegrationID) - if err != nil { - ddLog.Warn("Failed to create integration log file: ", err) - continue + // This check avoids duplicating files that have already been created + // by scanInitialFiles + logFile, exists := s.integrationToFile[cfg.IntegrationID] + + if !exists { + logFile, err = s.createFile(cfg.IntegrationID) + if err != nil { + ddLog.Error("Failed to create integration log file:", err) + continue + } + + // file to write the incoming logs to + s.integrationToFile[cfg.IntegrationID] = logFile } - filetypeSource := s.makeFileSource(source, logFilePath) - s.sources.AddSource(filetypeSource) - // file to write the incoming logs to - s.integrationToFile[cfg.IntegrationID] = logFilePath + filetypeSource := s.makeFileSource(source, logFile.filename) + s.sources.AddSource(filetypeSource) } } case log := <-s.integrationsLogsChan: - logFilePath := s.integrationToFile[log.IntegrationID] + s.receiveLogs(log) + case <-s.stop: + return + } + } +} - err := s.ensureFileSize(logFilePath) - if err != nil { - ddLog.Warn("Failed to get file size: ", err) - continue - } +// receiveLogs handles writing incoming logs to their respective file as well as +// enforcing size limitations +func (s *Launcher) receiveLogs(log integrations.IntegrationLog) { + fileToUpdate, exists := s.integrationToFile[log.IntegrationID] - err = s.writeFunction(logFilePath, log.Log) - if err != nil { - ddLog.Warn("Error writing log to file: ", err) - } - case <-s.stop: + if !exists { + ddLog.Warn("Failed to write log to file, file is nil for integration ID:", log.IntegrationID) + return + } + + // Ensure the individual file doesn't exceed integrations_logs_files_max_size + // Add 1 because we write the \n at the end as well + logSize := int64(len(log.Log)) + 1 + if fileToUpdate.size+logSize > s.fileSizeMax { + file, err := os.Create(fileToUpdate.filename) + if err != nil { + ddLog.Error("Failed to delete and remake oversize file:", err) return } + + s.combinedUsageSize -= fileToUpdate.size + + err = file.Close() + if err != nil { + ddLog.Warn("Failed to close file:", err) + } + + fileToUpdate.size = 0 + } + + // Ensure combined logs usage doesn't exceed integrations_logs_total_usage by + // deleting files until total usage falls below the set maximum + for s.combinedUsageSize+logSize > s.combinedUsageMax { + leastRecentlyModifiedFile := s.getLeastRecentlyModifiedFile() + + err := s.deleteFile(leastRecentlyModifiedFile) + if err != nil { + ddLog.Error("Error deleting log file:", err) + continue + } + + file, err := os.Create(leastRecentlyModifiedFile.filename) + if err != nil { + ddLog.Error("Error creating log file:", err) + continue + } + + err = file.Close() + if err != nil { + ddLog.Warn("Failed to close file:", err) + } + } + + err := s.writeLogToFileFunction(filepath.Join(s.runPath, fileToUpdate.filename), log.Log) + if err != nil { + ddLog.Warn("Error writing log to file:", err) + return } + + // Update information for the launcher and modified file + s.combinedUsageSize += logSize + fileToUpdate.lastModified = time.Now() + fileToUpdate.size += logSize } -// writeLogToFile is used as a function pointer +// deleteFile deletes the given file +func (s *Launcher) deleteFile(file *fileInfo) error { + filename := filepath.Join(s.runPath, file.filename) + err := os.Remove(filename) + if err != nil { + return err + } + ddLog.Info("Successfully deleted log file:", filename) + + s.combinedUsageSize -= file.size + + file.size = 0 + file.lastModified = time.Now() + + return nil +} + +// getLeastRecentlyModifiedFile returns the least recently modified file among +// all the files tracked by the integrations launcher +func (s *Launcher) getLeastRecentlyModifiedFile() *fileInfo { + leastRecentlyModifiedTime := time.Now() + var leastRecentlyModifiedFile *fileInfo + + for _, fileInfo := range s.integrationToFile { + if fileInfo.lastModified.Before(leastRecentlyModifiedTime) { + leastRecentlyModifiedFile = fileInfo + leastRecentlyModifiedTime = fileInfo.lastModified + } + } + + return leastRecentlyModifiedFile +} + +// writeLogToFile is used as a function pointer that writes a log to a given file func writeLogToFile(logFilePath, log string) error { file, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) if err != nil { - ddLog.Warn("Failed to open file to write log to: ", err) + ddLog.Error("Failed to open file to write log to:", err) return err } - defer file.Close() - _, err = file.WriteString(log) if err != nil { - ddLog.Warn("Failed to write integration log to file: ", err) + ddLog.Warn("Failed to write integration log to file:", err) return err } if _, err = file.Write(endOfLine); err != nil { - ddLog.Warn("Failed to write integration log to file: ", err) + ddLog.Warn("Failed to write integration log to file:", err) return err } - return nil + + return file.Close() } // makeFileSource Turns an integrations source into a logsSource @@ -155,58 +282,104 @@ func (s *Launcher) makeFileSource(source *sources.LogSource, logFilePath string) return fileSource } -// TODO Change file naming to reflect ID once logs from go interfaces gets merged. // createFile creates a file for the logsource -func (s *Launcher) createFile(id string) (string, error) { - logFilePath := s.integrationLogFilePath(id) +func (s *Launcher) createFile(source string) (*fileInfo, error) { + filepath := s.integrationLogFilePath(source) - file, err := os.Create(logFilePath) + file, err := os.Create(filepath) if err != nil { - return "", nil + ddLog.Error("Error creating file for log source:", err) + return nil, err } - defer file.Close() + ddLog.Info("Successfully created integrations log file:", file.Name()) - return logFilePath, nil + err = file.Close() + if err != nil { + return nil, err + } + + fileInfo := &fileInfo{ + filename: filepath, + lastModified: time.Now(), + size: 0, + } + + return fileInfo, nil } // integrationLoglogFilePath returns a file path to use for an integration log file func (s *Launcher) integrationLogFilePath(id string) string { - fileName := strings.ReplaceAll(id, " ", "-") - fileName = strings.ReplaceAll(fileName, ":", "_") + ".log" + fileName := strings.ReplaceAll(id, ":", "_") + ".log" logFilePath := filepath.Join(s.runPath, fileName) return logFilePath } -// ensureFileSize enforces the max file size for files integrations logs -// files. Files over the set size will be deleted and remade. -func (s *Launcher) ensureFileSize(logFilePath string) error { - maxFileSizeSetting := pkgConfig.Datadog().GetInt64("logs_config.integrations_logs_files_max_size") - maxFileSizeBytes := maxFileSizeSetting * 1024 * 1024 - - fi, err := os.Stat(logFilePath) +// computerDiskUsageMax computes the max disk space the launcher can use based +// off the integrations_logs_disk_ratio and integrations_logs_total_usage +// settings +func computeMaxDiskUsage(runPath string, logsTotalUsageSetting int64, usageRatio float64) (int64, error) { + usage, err := filesystem.NewDisk().GetUsage(runPath) if err != nil { - return err + return 0, err } - if fi.Size() > int64(maxFileSizeBytes) { - err := os.Remove(logFilePath) + diskReserved := float64(usage.Total) * (1 - usageRatio) + diskAvailable := int64(usage.Available) - int64(math.Ceil(diskReserved)) + + return min(logsTotalUsageSetting, diskAvailable), nil +} + +// scanInitialFiles scans the run path for initial files and then adds them to +// be managed by the launcher +func (s *Launcher) scanInitialFiles(dir string) error { + err := filepath.Walk(dir, func(_ string, info os.FileInfo, err error) error { if err != nil { - if os.IsNotExist(err) { - ddLog.Warn("File does not exist, creating new one: ", err) - } else { - ddLog.Warn("Error deleting file: ", err) - } - } else { - ddLog.Info("Successfully deleted oversize log file, creating new one.") + return nil + } + + if info.IsDir() { + return nil + } + + fileInfo := &fileInfo{ + filename: info.Name(), + size: info.Size(), + lastModified: info.ModTime(), } - file, err := os.Create(logFilePath) + integrationID := fileNameToID(fileInfo.filename) + + s.integrationToFile[integrationID] = fileInfo + s.combinedUsageSize += info.Size() + + return nil + }) + + if err != nil { + return err + } + + for s.combinedUsageSize > s.combinedUsageMax { + leastRecentlyModifiedFile := s.getLeastRecentlyModifiedFile() + + err = s.deleteFile(leastRecentlyModifiedFile) if err != nil { - return err + ddLog.Warn("Error deleting log file:", err) + break } - defer file.Close() } - return nil + return err +} + +func fileNameToID(fileName string) string { + baseName := strings.TrimSuffix(filepath.Base(fileName), filepath.Ext(fileName)) + lastUnderscoreIndex := strings.LastIndex(baseName, "_") + if lastUnderscoreIndex == -1 { + return baseName + } + + integrationID := baseName[:lastUnderscoreIndex] + ":" + baseName[lastUnderscoreIndex+1:] + return integrationID } diff --git a/pkg/logs/launchers/integration/launcher_test.go b/pkg/logs/launchers/integration/launcher_test.go index 1eef9c6eadf3e..6b33664f070ac 100644 --- a/pkg/logs/launchers/integration/launcher_test.go +++ b/pkg/logs/launchers/integration/launcher_test.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" integrationsmock "github.com/DataDog/datadog-agent/comp/logs/integrations/mock" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" @@ -47,13 +47,13 @@ func (suite *LauncherTestSuite) SetupTest() { suite.testPath = filepath.Join(suite.testDir, "logs_integration_test.log") suite.source = sources.NewLogSource(suite.T().Name(), &config.LogsConfig{Type: config.IntegrationType, Path: suite.testPath}) - // Override `logs_config.run_path` before calling `sources.NewLogSources()` as otherwise // it will try and create `/opt/datadog` directory and fail - pkgConfig.Datadog().SetWithoutSource("logs_config.run_path", suite.testDir) + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.run_path", suite.testDir) suite.s = NewLauncher(sources.NewLogSources(), suite.integrationsComp) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{suite.source})) + suite.s.fileSizeMax = 10 * 1024 * 1024 + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{suite.source})) suite.s.runPath = suite.testDir } @@ -75,7 +75,7 @@ func (suite *LauncherTestSuite) TestSendLog() { filepathChan := make(chan string) fileLogChan := make(chan string) - suite.s.writeFunction = func(logFilePath, log string) error { + suite.s.writeLogToFileFunction = func(logFilePath, log string) error { fileLogChan <- log filepathChan <- logFilePath return nil @@ -93,7 +93,7 @@ func (suite *LauncherTestSuite) TestSendLog() { assert.Equal(suite.T(), foundSource.Config.Type, config.FileType) assert.Equal(suite.T(), foundSource.Config.Source, "foo") assert.Equal(suite.T(), foundSource.Config.Service, "bar") - expectedPath := suite.s.integrationToFile[id] + expectedPath := filepath.Join(suite.s.runPath, suite.s.integrationToFile[id].filename) assert.Equal(suite.T(), logSample, <-fileLogChan) assert.Equal(suite.T(), expectedPath, <-filepathChan) @@ -101,7 +101,7 @@ func (suite *LauncherTestSuite) TestSendLog() { func (suite *LauncherTestSuite) TestWriteLogToFile() { logText := "hello world" - err := suite.s.writeFunction(suite.testPath, logText) + err := suite.s.writeLogToFileFunction(suite.testPath, logText) require.Nil(suite.T(), err) fileContents, err := os.ReadFile(suite.testPath) @@ -112,13 +112,13 @@ func (suite *LauncherTestSuite) TestWriteLogToFile() { func (suite *LauncherTestSuite) TestWriteMultipleLogsToFile() { var err error - err = suite.s.writeFunction(suite.testPath, "line 1") + err = suite.s.writeLogToFileFunction(suite.testPath, "line 1") require.Nil(suite.T(), err, "error writing line 1") - err = suite.s.writeFunction(suite.testPath, "line 2") + err = suite.s.writeLogToFileFunction(suite.testPath, "line 2") require.Nil(suite.T(), err, "error writing line 2") - err = suite.s.writeFunction(suite.testPath, "line 3") + err = suite.s.writeLogToFileFunction(suite.testPath, "line 3") require.Nil(suite.T(), err, "error writing line 3") fileContents, err := os.ReadFile(suite.testPath) @@ -128,6 +128,34 @@ func (suite *LauncherTestSuite) TestWriteMultipleLogsToFile() { assert.Equal(suite.T(), expectedContent, string(fileContents)) } +// TestDeleteFile tests that deleteFile properly deletes the correct file +func (suite *LauncherTestSuite) TestDeleteFile() { + filename := "testfile.log" + filepath := filepath.Join(suite.s.runPath, filename) + file, err := os.Create(filepath) + fileinfo := &fileInfo{filename: filename, size: int64(0)} + assert.Nil(suite.T(), err) + + info, err := os.Stat(filepath) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(0), info.Size(), "Newly created file size not zero") + + // Write data the file and make sure ensureFileSize deletes the file for being too large + data := make([]byte, 2*1024*1024) + file.Write(data) + file.Close() + + info, err = os.Stat(filepath) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(2*1024*1024), info.Size()) + + err = suite.s.deleteFile(fileinfo) + assert.Nil(suite.T(), err) + + _, err = os.Stat(filepath) + assert.True(suite.T(), os.IsNotExist(err)) +} + // TestIntegrationLogFilePath ensures the filepath for the logs files are correct func (suite *LauncherTestSuite) TestIntegrationLogFilePath() { id := "123456789" @@ -137,10 +165,296 @@ func (suite *LauncherTestSuite) TestIntegrationLogFilePath() { id = "1234 5678:myIntegration" actualFilePath = suite.s.integrationLogFilePath(id) - expectedFilePath = filepath.Join(suite.s.runPath, "1234-5678_myIntegration.log") + expectedFilePath = filepath.Join(suite.s.runPath, "1234 5678_myIntegration.log") assert.Equal(suite.T(), expectedFilePath, actualFilePath) } +// TestFileNameToID ensures file names are decoded to their proper id +func (suite *LauncherTestSuite) TestFileNameToID() { + tests := []struct { + input string + expected string + }{ + {"file_name_1234.log", "file_name:1234"}, + {"example_test_5678abcd.log", "example_test:5678abcd"}, + {"integration with spaces_5678.log", "integration with spaces:5678"}, + {"file_with_multiple_underscores_9999.log", "file_with_multiple_underscores:9999"}, + } + + for _, tt := range tests { + suite.T().Run(tt.input, func(_ *testing.T) { + result := fileNameToID(tt.input) + assert.Equal(suite.T(), tt.expected, result) + }) + } +} + +// TestFileExceedsSingleFileLimit ensures individual files cannot exceed file +// limit sizes +func (suite *LauncherTestSuite) TestFileExceedsSingleFileLimit() { + oneMB := int64(1 * 1024 * 1024) + suite.s.combinedUsageMax = 2 * oneMB + suite.s.fileSizeMax = oneMB + + filename := "sample_integration_123.log" + filepath := filepath.Join(suite.s.runPath, filename) + file, err := os.Create(filepath) + assert.Nil(suite.T(), err) + + file.Write(make([]byte, oneMB)) + file.Close() + + suite.s.Start(nil, nil, nil, nil) + + integrationLog := integrations.IntegrationLog{ + Log: "sample log", + IntegrationID: "sample_integration:123", + } + + suite.s.receiveLogs(integrationLog) + + assert.Equal(suite.T(), int64(len(integrationLog.Log)+1), suite.s.combinedUsageSize) + assert.Equal(suite.T(), int64(len(integrationLog.Log)+1), suite.s.integrationToFile["sample_integration:123"].size) + assert.Equal(suite.T(), 1, len(suite.s.integrationToFile)) +} + +// TestScanInitialFiles ensures files already present in the runPath for the +// launcher are detected and managed upon launcher start +func (suite *LauncherTestSuite) TestScanInitialFiles() { + filename := "sample_integration_123.log" + fileSize := int64(1 * 1024 * 1024) + + file, err := os.Create(filepath.Join(suite.s.runPath, filename)) + assert.Nil(suite.T(), err) + + data := make([]byte, fileSize) + file.Write(data) + file.Close() + + suite.s.scanInitialFiles(suite.s.runPath) + fileID := fileNameToID(filename) + actualFileInfo := suite.s.integrationToFile[fileID] + + assert.NotEmpty(suite.T(), suite.s.integrationToFile) + assert.Equal(suite.T(), actualFileInfo.filename, filename) + assert.Equal(suite.T(), fileSize, actualFileInfo.size) + assert.Equal(suite.T(), fileSize, suite.s.combinedUsageSize) +} + +// TestCreateFileAfterScanInitialFile ensures files tracked by scanInitialFiles +// are not created again after they've already been scanned +func (suite *LauncherTestSuite) TestCreateFileAfterScanInitialFile() { + filename := "sample_integration_123.log" + fileSize := int64(1 * 1024 * 1024) + + file, err := os.Create(filepath.Join(suite.s.runPath, filename)) + assert.Nil(suite.T(), err) + + data := make([]byte, fileSize) + file.Write(data) + file.Close() + + suite.s.scanInitialFiles(suite.s.runPath) + fileID := fileNameToID(filename) + scannedFile := suite.s.integrationToFile[fileID] + + assert.NotEmpty(suite.T(), suite.s.integrationToFile) + assert.Equal(suite.T(), filename, scannedFile.filename) + assert.Equal(suite.T(), fileSize, scannedFile.size) + assert.Equal(suite.T(), fileSize, suite.s.combinedUsageSize) + + mockConf := &integration.Config{} + mockConf.Provider = "container" + mockConf.LogsConfig = integration.Data(`[{"type": "integration", "source": "foo", "service": "bar"}]`) + + filepathChan := make(chan string) + fileLogChan := make(chan string) + suite.s.writeLogToFileFunction = func(logFilePath, log string) error { + fileLogChan <- log + filepathChan <- logFilePath + return nil + } + + suite.s.Start(nil, nil, nil, nil) + suite.integrationsComp.RegisterIntegration(fileID, *mockConf) + assert.Equal(suite.T(), 1, len(suite.s.integrationToFile)) + + logSample := "hello world" + suite.integrationsComp.SendLog(logSample, fileID) + + foundSource := suite.s.sources.GetSources()[0] + assert.Equal(suite.T(), foundSource.Config.Type, config.FileType) + assert.Equal(suite.T(), foundSource.Config.Source, "foo") + assert.Equal(suite.T(), foundSource.Config.Service, "bar") + + assert.Equal(suite.T(), logSample, <-fileLogChan) +} + +// TestSentLogExceedsTotalUsage ensures files are deleted when a sent log causes a +// disk usage overage +func (suite *LauncherTestSuite) TestSentLogExceedsTotalUsage() { + suite.s.combinedUsageMax = 3 * 1024 * 1024 + + filename1 := "sample_integration1_123.log" + filename2 := "sample_integration2_123.log" + filename3 := "sample_integration3_123.log" + + file1, err := os.Create(filepath.Join(suite.s.runPath, filename1)) + assert.Nil(suite.T(), err) + file2, err := os.Create(filepath.Join(suite.s.runPath, filename2)) + assert.Nil(suite.T(), err) + file3, err := os.Create(filepath.Join(suite.s.runPath, filename3)) + assert.Nil(suite.T(), err) + + dataOneMB := make([]byte, 1*1024*1024) + file1.Write(dataOneMB) + file2.Write(dataOneMB) + file3.Write(dataOneMB) + file1.Close() + file2.Close() + file3.Close() + + suite.s.Start(nil, nil, nil, nil) + + integrationLog := integrations.IntegrationLog{ + Log: "sample log", + IntegrationID: "sample_integration1:123", + } + + suite.s.receiveLogs(integrationLog) + + file1Stat, err := os.Stat(filepath.Join(suite.s.runPath, filename1)) + assert.Nil(suite.T(), err) + file2Stat, err := os.Stat(filepath.Join(suite.s.runPath, filename2)) + assert.Nil(suite.T(), err) + file3Stat, err := os.Stat(filepath.Join(suite.s.runPath, filename3)) + assert.Nil(suite.T(), err) + + actualSize := file1Stat.Size() + file2Stat.Size() + file3Stat.Size() + + assert.Equal(suite.T(), suite.s.combinedUsageSize, actualSize) + assert.Equal(suite.T(), suite.s.integrationToFile["sample_integration2:123"], suite.s.getLeastRecentlyModifiedFile()) +} + +// TestInitialLogsExceedTotalUsageMultipleFiles ensures initial files are deleted if they +// exceed total allowed disk space +func (suite *LauncherTestSuite) TestInitialLogsExceedTotalUsageMultipleFiles() { + oneMB := int64(1 * 1024 * 1024) + suite.s.combinedUsageMax = oneMB + + filename1 := "sample_integration1_123.log" + filename2 := "sample_integration2_123.log" + + dataOneMB := make([]byte, oneMB) + + file1, err := os.Create(filepath.Join(suite.s.runPath, filename1)) + assert.Nil(suite.T(), err) + file2, err := os.Create(filepath.Join(suite.s.runPath, filename2)) + assert.Nil(suite.T(), err) + + file1.Write(dataOneMB) + file2.Write(dataOneMB) + file1.Close() + file2.Close() + + suite.s.Start(nil, nil, nil, nil) + + assert.Equal(suite.T(), oneMB, suite.s.combinedUsageSize) + assert.Equal(suite.T(), 2, len(suite.s.integrationToFile)) +} + +// TestInitialLogExceedsTotalUsageSingleFile ensures an initial file won't +// exceed the total allowed disk usage space +func (suite *LauncherTestSuite) TestInitialLogExceedsTotalUsageSingleFile() { + oneMB := int64(1 * 1024 * 1024) + suite.s.combinedUsageMax = oneMB + + filename := "sample_integration1_123.log" + dataTwoMB := make([]byte, 2*oneMB) + + file, err := os.Create(filepath.Join(suite.s.runPath, filename)) + assert.Nil(suite.T(), err) + + file.Write(dataTwoMB) + file.Close() + + suite.s.Start(nil, nil, nil, nil) + + assert.Equal(suite.T(), int64(0), suite.s.combinedUsageSize) + assert.Equal(suite.T(), 1, len(suite.s.integrationToFile)) +} + +// TestScanInitialFilesDeletesProperly ensures the scanInitialFiles function +// properly deletes log files once the sum of sizes for the scanned files is too +// large +func (suite *LauncherTestSuite) TestScanInitialFilesDeletesProperly() { + err := os.RemoveAll(suite.s.runPath) + assert.Nil(suite.T(), err) + os.MkdirAll(suite.s.runPath, 0755) + assert.Nil(suite.T(), err) + + oneMB := int64(1 * 1024 * 1024) + suite.s.combinedUsageMax = oneMB + + filename1 := "sample_integration1_123.log" + filename2 := "sample_integration2_123.log" + + name := filepath.Join(suite.s.runPath, filename1) + file1, err := os.Create(name) + assert.Nil(suite.T(), err) + file2, err := os.Create(filepath.Join(suite.s.runPath, filename2)) + assert.Nil(suite.T(), err) + + dataOneMB := make([]byte, oneMB) + file1.Write(dataOneMB) + file2.Write(dataOneMB) + file1.Close() + file2.Close() + + suite.s.scanInitialFiles(suite.s.runPath) + + // make sure there is only one file in the directory + files, err := os.ReadDir(suite.s.runPath) + assert.Nil(suite.T(), err) + + fileCount := 0 + for _, file := range files { + if !file.IsDir() { + fileCount++ + } + } + + assert.Equal(suite.T(), 1, fileCount) +} + func TestLauncherTestSuite(t *testing.T) { suite.Run(t, new(LauncherTestSuite)) } + +// TestReadOnlyFileSystem ensures the launcher doesn't panic in a read-only +// file system. There will be errors but it should handle them gracefully. +func TestReadOnlyFileSystem(t *testing.T) { + readOnlyDir := filepath.Join(t.TempDir(), "readonly") + err := os.Mkdir(readOnlyDir, 0444) + assert.Nil(t, err, "Unable to make tempdir readonly") + + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.run_path", readOnlyDir) + + integrationsComp := integrationsmock.Mock() + s := NewLauncher(sources.NewLogSources(), integrationsComp) + + // Check the launcher doesn't block on receiving channels + mockConf := &integration.Config{} + mockConf.Provider = "container" + mockConf.LogsConfig = integration.Data(`[{"type": "integration", "source": "foo", "service": "bar"}]`) + id := "123456789" + + s.Start(nil, nil, nil, nil) + integrationsComp.RegisterIntegration(id, *mockConf) + + logSample := "hello world" + integrationsComp.SendLog(logSample, id) + + // send a second log to make sure the launcher isn't blocking + integrationsComp.SendLog(logSample, id) +} diff --git a/pkg/logs/message/go.mod b/pkg/logs/message/go.mod index 9e25811e26e28..15447f630028a 100644 --- a/pkg/logs/message/go.mod +++ b/pkg/logs/message/go.mod @@ -15,7 +15,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/sources => ../sources github.com/DataDog/datadog-agent/pkg/logs/status/utils => ../status/utils @@ -39,28 +42,31 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -80,7 +86,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -88,12 +94,12 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/message/go.sum b/pkg/logs/message/go.sum index 77ba213060c82..765bdc23a7bf4 100644 --- a/pkg/logs/message/go.sum +++ b/pkg/logs/message/go.sum @@ -180,8 +180,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -251,15 +252,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +296,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -312,8 +313,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/logs/message/message.go b/pkg/logs/message/message.go index bac1ade6202de..e852a97d70ae2 100644 --- a/pkg/logs/message/message.go +++ b/pkg/logs/message/message.go @@ -354,3 +354,13 @@ func (m *Message) Tags() []string { func (m *Message) TagsToString() string { return m.Origin.TagsToString(m.ProcessingTags) } + +// TruncatedReasonTag returns a tag with the reason for truncation. +func TruncatedReasonTag(reason string) string { + return fmt.Sprintf("truncated:%s", reason) +} + +// MultiLineSourceTag returns a tag for multiline logs. +func MultiLineSourceTag(source string) string { + return fmt.Sprintf("multiline:%s", source) +} diff --git a/pkg/logs/metrics/metrics.go b/pkg/logs/metrics/metrics.go index 2ba51621ece48..0bee866c45c43 100644 --- a/pkg/logs/metrics/metrics.go +++ b/pkg/logs/metrics/metrics.go @@ -71,6 +71,9 @@ var ( //nolint:revive // TODO(AML) Fix revive linter TlmDestinationHttpRespByStatusAndUrl = telemetry.NewCounter("logs", "destination_http_resp", []string{"status_code", "url"}, "Count of http responses by status code and destination url") + // TlmAutoMultilineAggregatorFlush Count of each line flushed from the auto mulitline aggregator. + TlmAutoMultilineAggregatorFlush = telemetry.NewCounter("logs", "auto_multi_line_aggregator_flush", []string{"truncated", "line_type"}, "Count of each line flushed from the auto mulitline aggregator") + // TlmLogsDiscardedFromSDSBuffer how many messages were dropped when waiting for an SDS configuration because the buffer is full TlmLogsDiscardedFromSDSBuffer = telemetry.NewCounter("logs", "sds__dropped_from_buffer", nil, "Count of messages dropped from the buffer while waiting for an SDS configuration") ) diff --git a/pkg/logs/pipeline/go.mod b/pkg/logs/pipeline/go.mod index 4a1f156e04cae..4248a782f0a22 100644 --- a/pkg/logs/pipeline/go.mod +++ b/pkg/logs/pipeline/go.mod @@ -17,7 +17,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../client @@ -55,7 +58,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 @@ -65,7 +68,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/sender v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 github.com/hashicorp/go-multierror v1.1.1 github.com/stretchr/testify v1.9.0 @@ -74,30 +77,33 @@ require ( require ( github.com/DataDog/agent-payload/v5 v5.0.106 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect @@ -132,7 +138,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -150,13 +156,13 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/pipeline/go.sum b/pkg/logs/pipeline/go.sum index 6d9b20fb4b0fa..82acf8c4755f3 100644 --- a/pkg/logs/pipeline/go.sum +++ b/pkg/logs/pipeline/go.sum @@ -1,44 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/agent-payload/v5 v5.0.106 h1:A3dGX+JYoL7OJe2crpxznW7hWxLxhOk/17WbYskRWVk= github.com/DataDog/agent-payload/v5 v5.0.106/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= @@ -65,15 +27,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -88,11 +44,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -100,9 +52,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -122,68 +71,26 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -195,21 +102,15 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= @@ -219,7 +120,6 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -250,8 +150,6 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= @@ -300,8 +198,8 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -320,8 +218,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -339,19 +235,11 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= @@ -386,106 +274,42 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -493,61 +317,23 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -556,151 +342,30 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -726,12 +391,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/logs/processor/go.mod b/pkg/logs/processor/go.mod index 24e11c8274253..e59fe22f6ce98 100644 --- a/pkg/logs/processor/go.mod +++ b/pkg/logs/processor/go.mod @@ -16,7 +16,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/diagnostic => ../diagnostic github.com/DataDog/datadog-agent/pkg/logs/message => ../message @@ -47,37 +50,40 @@ require ( github.com/DataDog/agent-payload/v5 v5.0.106 github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect @@ -110,7 +116,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -129,12 +135,12 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/processor/go.sum b/pkg/logs/processor/go.sum index 24e2f0deda878..3fdb44926c3fe 100644 --- a/pkg/logs/processor/go.sum +++ b/pkg/logs/processor/go.sum @@ -1,44 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/agent-payload/v5 v5.0.106 h1:A3dGX+JYoL7OJe2crpxznW7hWxLxhOk/17WbYskRWVk= github.com/DataDog/agent-payload/v5 v5.0.106/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= @@ -63,15 +25,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -86,11 +42,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -98,9 +50,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -120,68 +69,26 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -189,21 +96,15 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9G github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= @@ -213,7 +114,6 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -244,8 +144,6 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= @@ -294,8 +192,8 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -314,8 +212,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -333,19 +229,11 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= @@ -380,104 +268,40 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -485,61 +309,23 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -548,151 +334,30 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -718,12 +383,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/logs/schedulers/ad/scheduler.go b/pkg/logs/schedulers/ad/scheduler.go index 2de2f525df0bd..c9bc320d5b749 100644 --- a/pkg/logs/schedulers/ad/scheduler.go +++ b/pkg/logs/schedulers/ad/scheduler.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" logsConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util/adlistener" "github.com/DataDog/datadog-agent/pkg/logs/schedulers" "github.com/DataDog/datadog-agent/pkg/logs/service" @@ -153,7 +153,7 @@ func CreateSources(config integration.Config) ([]*sourcesPkg.LogSource, error) { // config attached to a container label or a pod annotation configs, err = logsConfig.ParseJSON(config.LogsConfig) case names.RemoteConfig: - if pkgconfig.Datadog().GetBool("remote_configuration.agent_integrations.allow_log_config_scheduling") { + if pkgconfigsetup.Datadog().GetBool("remote_configuration.agent_integrations.allow_log_config_scheduling") { // config supplied by remote config configs, err = logsConfig.ParseJSON(config.LogsConfig) } else { diff --git a/pkg/logs/schedulers/ad/scheduler_test.go b/pkg/logs/schedulers/ad/scheduler_test.go index 477c29b7af2a6..669a1a8a21d4b 100644 --- a/pkg/logs/schedulers/ad/scheduler_test.go +++ b/pkg/logs/schedulers/ad/scheduler_test.go @@ -9,9 +9,9 @@ import ( "fmt" "testing" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -213,7 +213,7 @@ func TestIgnoreRemoteConfigIfDisabled(t *testing.T) { ClusterCheck: false, } configmock.New(t) - pkgconfig.Datadog().Set("remote_configuration.agent_integrations.allow_log_config_scheduling", rcLogCfgSchedEnabled, model.SourceFile) + pkgconfigsetup.Datadog().Set("remote_configuration.agent_integrations.allow_log_config_scheduling", rcLogCfgSchedEnabled, model.SourceFile) scheduler.Schedule([]integration.Config{configSource}) if rcLogCfgSchedEnabled { require.Equal(t, 1, len(spy.Events)) diff --git a/pkg/logs/schedulers/cca/scheduler.go b/pkg/logs/schedulers/cca/scheduler.go index fe54e99cd11b1..5d7c68f082763 100644 --- a/pkg/logs/schedulers/cca/scheduler.go +++ b/pkg/logs/schedulers/cca/scheduler.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery" logsConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/schedulers" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -37,7 +37,7 @@ func New(ac autodiscovery.Component) schedulers.Scheduler { // Start implements schedulers.Scheduler#Start. func (s *Scheduler) Start(sourceMgr schedulers.SourceManager) { - if !coreConfig.Datadog().GetBool("logs_config.container_collect_all") { + if !pkgconfigsetup.Datadog().GetBool("logs_config.container_collect_all") { return } // source to collect all logs from all containers @@ -52,7 +52,7 @@ func (s *Scheduler) Start(sourceMgr schedulers.SourceManager) { // a hack! go func() { s.blockUntilAutoConfigRanOnce( - time.Millisecond * time.Duration(coreConfig.Datadog().GetInt("ac_load_timeout"))) + time.Millisecond * time.Duration(pkgconfigsetup.Datadog().GetInt("ac_load_timeout"))) log.Debug("Adding ContainerCollectAll source to the Logs Agent") sourceMgr.AddSource(source) close(s.added) diff --git a/pkg/logs/sds/go.mod b/pkg/logs/sds/go.mod index 03e37c5ec16da..7601127ccdb93 100644 --- a/pkg/logs/sds/go.mod +++ b/pkg/logs/sds/go.mod @@ -17,7 +17,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/client => ../client github.com/DataDog/datadog-agent/pkg/logs/message => ../message @@ -46,36 +49,39 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -106,7 +112,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -125,12 +131,12 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/sds/go.sum b/pkg/logs/sds/go.sum index 00f5c6c8590cf..70db51e8fe320 100644 --- a/pkg/logs/sds/go.sum +++ b/pkg/logs/sds/go.sum @@ -1,44 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= @@ -61,15 +23,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -84,11 +40,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -96,9 +48,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -116,68 +65,26 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -185,21 +92,15 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9G github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= @@ -208,7 +109,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -239,8 +139,6 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= @@ -289,8 +187,8 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -309,8 +207,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -328,19 +224,9 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= @@ -375,104 +261,34 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -480,212 +296,47 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -711,12 +362,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/logs/sds/rules.go b/pkg/logs/sds/rules.go index a5e32fe903cfa..43a3af4ac4fb5 100644 --- a/pkg/logs/sds/rules.go +++ b/pkg/logs/sds/rules.go @@ -50,7 +50,9 @@ type StandardRulesConfig struct { // StandardRulesDefaults contains consts defaults information for // standard rules. type StandardRulesDefaults struct { - IncludedKeywordsCharCount uint32 `json:"included_keywords_char_count"` + IncludedKeywordsCharCount uint32 `json:"included_keywords_char_count"` + ExcludedKeywordsCharCount uint32 `json:"excluded_keywords_char_count"` + ExcludedKeywords []string `json:"excluded_keywords"` } // RuleConfig of rule as sent by the Remote Configuration. @@ -67,8 +69,9 @@ type RuleConfig struct { // ProximityKeywords definition in RC config. type ProximityKeywords struct { - Keywords []string `json:"keywords"` - CharacterCount uint32 `json:"character_count"` + Keywords []string `json:"keywords"` + CharacterCount uint32 `json:"character_count"` + UseRecommendedKeywords bool `json:"use_recommended_keywords"` } // RuleDefinition definition in RC config. diff --git a/pkg/logs/sds/scanner.go b/pkg/logs/sds/scanner.go index c8f726b38a18d..581fe810a7fbb 100644 --- a/pkg/logs/sds/scanner.go +++ b/pkg/logs/sds/scanner.go @@ -14,6 +14,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -32,6 +33,8 @@ var ( "Count of SDS reconfiguration error.", telemetry.Options{DefaultMetric: true}) tlmSDSReconfigSuccess = telemetry.NewCounterWithOpts("sds", "reconfiguration_success", []string{"pipeline", "type"}, "Count of SDS reconfiguration success.", telemetry.Options{DefaultMetric: true}) + tlmSDSProcessingLatency = telemetry.NewSimpleHistogram("sds", "processing_latency", "Processing latency histogram", + []float64{10, 250, 500, 2000, 5000, 10000}) // unit: us ) // Scanner wraps an SDS Scanner implementation, adds reconfiguration @@ -284,7 +287,6 @@ func interpretRCRule(userRule RuleConfig, standardRule StandardRuleConfig, defau reqCapabilitiesCount := len(stdRuleDef.RequiredCapabilities) if reqCapabilitiesCount > 0 { if reqCapabilitiesCount > 1 { - // TODO(remy): telemetry log.Warnf("Standard rule '%v' with multiple required capabilities: %d. Only the first one will be used", standardRule.Name, reqCapabilitiesCount) } received := stdRuleDef.RequiredCapabilities[0] @@ -307,20 +309,27 @@ func interpretRCRule(userRule RuleConfig, standardRule StandardRuleConfig, defau } if defToUse.Version == -1 { - // TODO(remy): telemetry return nil, fmt.Errorf("unsupported rule with no compatible definition") } - // we use the filled `CharacterCount` value to decide if we want - // to use the user provided configuration for proximity keywords - // or if we have to use the information provided in the std rules instead. - if userRule.IncludedKeywords.CharacterCount > 0 { - // proximity keywords configuration provided by the user - extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(userRule.IncludedKeywords.CharacterCount, userRule.IncludedKeywords.Keywords, nil) - } else if len(defToUse.DefaultIncludedKeywords) > 0 && defaults.IncludedKeywordsCharCount > 0 { - // the user has not specified proximity keywords - // use the proximity keywords provided by the standard rule if any + // If the "Use recommended keywords" checkbox has been checked, we use the default + // included keywords available in the rule (curated by Datadog). + // Otherwise: + // If some included keywords have been manually filled by the user, we use them + // Else we start using the default excluded keywords. + if userRule.IncludedKeywords.UseRecommendedKeywords { + // default included keywords extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(defaults.IncludedKeywordsCharCount, defToUse.DefaultIncludedKeywords, nil) + } else { + if len(userRule.IncludedKeywords.Keywords) > 0 && userRule.IncludedKeywords.CharacterCount > 0 { + // user provided included keywords + extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(userRule.IncludedKeywords.CharacterCount, userRule.IncludedKeywords.Keywords, nil) + } else if len(defaults.ExcludedKeywords) > 0 && defaults.ExcludedKeywordsCharCount > 0 { + // default excluded keywords + extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(defaults.ExcludedKeywordsCharCount, nil, defaults.ExcludedKeywords) + } else { + log.Warn("not using the recommended keywords but no keywords available for rule", userRule.Name) + } } // we've compiled all necessary information merging the standard rule and the user config @@ -358,6 +367,7 @@ func interpretRCRule(userRule RuleConfig, standardRule StandardRuleConfig, defau func (s *Scanner) Scan(event []byte, msg *message.Message) (bool, []byte, error) { s.Lock() defer s.Unlock() + start := time.Now() if s.Scanner == nil { return false, nil, fmt.Errorf("can't Scan with an unitialized scanner") @@ -378,6 +388,7 @@ func (s *Scanner) Scan(event []byte, msg *message.Message) (bool, []byte, error) // using a tag. msg.ProcessingTags = append(msg.ProcessingTags, ScannedTag) + tlmSDSProcessingLatency.Observe(float64(time.Since(start) / 1000)) return scanResult.Mutated, scanResult.Event, err } diff --git a/pkg/logs/sds/scanner_test.go b/pkg/logs/sds/scanner_test.go index 3ccc85fe7be4b..bf27ea97ae8e0 100644 --- a/pkg/logs/sds/scanner_test.go +++ b/pkg/logs/sds/scanner_test.go @@ -572,6 +572,12 @@ func TestCloseCycleScan(t *testing.T) { func TestInterpretRC(t *testing.T) { require := require.New(t) + defaults := StandardRulesDefaults{ + IncludedKeywordsCharCount: 10, + ExcludedKeywordsCharCount: 10, + ExcludedKeywords: []string{"trace-id"}, + } + stdRc := StandardRuleConfig{ ID: "0", Name: "Zero", @@ -593,9 +599,12 @@ func TestInterpretRC(t *testing.T) { Type: matchActionRCRedact, Placeholder: "[redacted]", }, + IncludedKeywords: ProximityKeywords{ + UseRecommendedKeywords: true, + }, } - rule, err := interpretRCRule(rc, stdRc, StandardRulesDefaults{}) + rule, err := interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok := rule.(sds.RegexRuleConfig) require.True(ok) @@ -611,7 +620,7 @@ func TestInterpretRC(t *testing.T) { RequiredCapabilities: []string{RCSecondaryValidationLuhnChecksum}, }) - rule, err = interpretRCRule(rc, stdRc, StandardRulesDefaults{}) + rule, err = interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok = rule.(sds.RegexRuleConfig) require.True(ok) @@ -641,7 +650,7 @@ func TestInterpretRC(t *testing.T) { }, } - rule, err = interpretRCRule(rc, stdRc, StandardRulesDefaults{}) + rule, err = interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok = rule.(sds.RegexRuleConfig) require.True(ok) @@ -650,8 +659,11 @@ func TestInterpretRC(t *testing.T) { require.Equal(rxRule.Pattern, "second pattern") require.Equal(rxRule.SecondaryValidator, sds.LuhnChecksum) + // included keywords + // ----------------- + // make sure we use the keywords proximity feature if any's configured - // in the std rule definition stdRc.Definitions = []StandardRuleDefinition{ + // in the std rule definition stdRc.Definitions = []StandardRuleDefinition{ { Version: 2, @@ -666,7 +678,7 @@ func TestInterpretRC(t *testing.T) { }, } - rule, err = interpretRCRule(rc, stdRc, StandardRulesDefaults{IncludedKeywordsCharCount: 10}) + rule, err = interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok = rule.(sds.RegexRuleConfig) require.True(ok) @@ -681,11 +693,12 @@ func TestInterpretRC(t *testing.T) { // make sure we use the user provided information first // even if there is some in the std rule rc.IncludedKeywords = ProximityKeywords{ - Keywords: []string{"custom"}, - CharacterCount: 42, + Keywords: []string{"custom"}, + CharacterCount: 42, + UseRecommendedKeywords: false, } - rule, err = interpretRCRule(rc, stdRc, StandardRulesDefaults{IncludedKeywordsCharCount: 10}) + rule, err = interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok = rule.(sds.RegexRuleConfig) require.True(ok) @@ -696,4 +709,42 @@ func TestInterpretRC(t *testing.T) { require.NotNil(rxRule.ProximityKeywords) require.Equal(rxRule.ProximityKeywords.LookAheadCharacterCount, uint32(42)) require.Equal(rxRule.ProximityKeywords.IncludedKeywords, []string{"custom"}) + + // excluded keywords + // ----------------- + + // make sure we use the user provided information first + // even if there is some in the std rule + rc.IncludedKeywords = ProximityKeywords{ + Keywords: nil, + CharacterCount: 0, + UseRecommendedKeywords: false, + } + + // make sure we use the keywords proximity feature if any's configured + // in the std rule definition, here the excluded keywords one + stdRc.Definitions = []StandardRuleDefinition{ + { + Version: 2, + Pattern: "second pattern", + RequiredCapabilities: []string{RCSecondaryValidationLuhnChecksum}, + }, + { + Version: 1, + Pattern: "first pattern", + RequiredCapabilities: nil, + }, + } + + rule, err = interpretRCRule(rc, stdRc, defaults) + require.NoError(err) + rxRule, ok = rule.(sds.RegexRuleConfig) + require.True(ok) + + require.Equal(rxRule.Id, "Zero") + require.Equal(rxRule.Pattern, "second pattern") + require.Equal(rxRule.SecondaryValidator, sds.LuhnChecksum) + require.NotNil(rxRule.ProximityKeywords) + require.Equal(rxRule.ProximityKeywords.LookAheadCharacterCount, uint32(10)) + require.Equal(rxRule.ProximityKeywords.ExcludedKeywords, []string{"trace-id"}) } diff --git a/pkg/logs/sender/go.mod b/pkg/logs/sender/go.mod index a53ad5a116d7d..e45f718956ac4 100644 --- a/pkg/logs/sender/go.mod +++ b/pkg/logs/sender/go.mod @@ -17,7 +17,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/client => ../client github.com/DataDog/datadog-agent/pkg/logs/message => ../message @@ -47,40 +50,43 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/benbjohnson/clock v1.3.5 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -111,7 +117,7 @@ require ( github.com/prometheus/procfs v0.11.1 // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect @@ -130,13 +136,13 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/sender/go.sum b/pkg/logs/sender/go.sum index 9b6cd83d632fb..23e63718ff3ed 100644 --- a/pkg/logs/sender/go.sum +++ b/pkg/logs/sender/go.sum @@ -1,44 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= @@ -61,15 +23,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -84,11 +40,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -96,9 +48,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -116,68 +65,26 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -185,21 +92,15 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9G github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= @@ -208,7 +109,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -239,8 +139,6 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= @@ -289,8 +187,8 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -309,8 +207,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -328,19 +224,9 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= @@ -375,106 +261,36 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -482,212 +298,47 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -713,12 +364,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/logs/sources/go.mod b/pkg/logs/sources/go.mod index aca5605c063da..6638152eb9698 100644 --- a/pkg/logs/sources/go.mod +++ b/pkg/logs/sources/go.mod @@ -15,7 +15,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/status/utils => ../status/utils github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry @@ -38,27 +41,30 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -78,7 +84,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -86,12 +92,12 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/sources/go.sum b/pkg/logs/sources/go.sum index 77ba213060c82..765bdc23a7bf4 100644 --- a/pkg/logs/sources/go.sum +++ b/pkg/logs/sources/go.sum @@ -180,8 +180,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -251,15 +252,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +296,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -312,8 +313,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/logs/status/status_test.go b/pkg/logs/status/status_test.go index a8465583df008..d1b3904aa5ffd 100644 --- a/pkg/logs/status/status_test.go +++ b/pkg/logs/status/status_test.go @@ -14,14 +14,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/metrics" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/util/testutils" ) func initStatus() { - InitStatus(pkgConfig.Datadog(), testutils.CreateSources([]*sources.LogSource{ + InitStatus(pkgconfigsetup.Datadog(), testutils.CreateSources([]*sources.LogSource{ sources.NewLogSource("foo", &config.LogsConfig{Type: "foo"}), sources.NewLogSource("bar", &config.LogsConfig{Type: "foo"}), sources.NewLogSource("foo", &config.LogsConfig{Type: "foo"}), diff --git a/pkg/logs/status/test_utils.go b/pkg/logs/status/test_utils.go index 0c3d6ee515605..78c248850f891 100644 --- a/pkg/logs/status/test_utils.go +++ b/pkg/logs/status/test_utils.go @@ -11,14 +11,14 @@ import ( "go.uber.org/atomic" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/metrics" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/tailers" ) // InitStatus initialize a status builder -func InitStatus(coreConfig pkgConfig.Reader, sources *sources.LogSources) { +func InitStatus(coreConfig model.Reader, sources *sources.LogSources) { var isRunning = atomic.NewUint32(StatusRunning) tracker := tailers.NewTailerTracker() endpoints, _ := config.BuildEndpoints(coreConfig, config.HTTPConnectivityFailure, "test-track", "test-proto", "test-source") diff --git a/pkg/logs/tailers/file/tailer.go b/pkg/logs/tailers/file/tailer.go index 9f76f70df132e..d0637eaf05e11 100644 --- a/pkg/logs/tailers/file/tailer.go +++ b/pkg/logs/tailers/file/tailer.go @@ -20,7 +20,7 @@ import ( "github.com/benbjohnson/clock" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder" @@ -149,8 +149,8 @@ func NewTailer(opts *TailerOptions) *Tailer { } forwardContext, stopForward := context.WithCancel(context.Background()) - closeTimeout := coreConfig.Datadog().GetDuration("logs_config.close_timeout") * time.Second - windowsOpenFileTimeout := coreConfig.Datadog().GetDuration("logs_config.windows_open_file_timeout") * time.Second + closeTimeout := pkgconfigsetup.Datadog().GetDuration("logs_config.close_timeout") * time.Second + windowsOpenFileTimeout := pkgconfigsetup.Datadog().GetDuration("logs_config.windows_open_file_timeout") * time.Second bytesRead := status.NewCountInfo("Bytes Read") fileRotated := opts.Rotated diff --git a/pkg/logs/tailers/file/tailer_test.go b/pkg/logs/tailers/file/tailer_test.go index affdeb992ab65..10ac08d382d73 100644 --- a/pkg/logs/tailers/file/tailer_test.go +++ b/pkg/logs/tailers/file/tailer_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/sources" @@ -105,7 +105,7 @@ func (suite *TailerTestSuite) TestTialerTimeDurationConfig() { // To satisfy the suite level tailer suite.tailer.StartFromBeginning() - coreConfig.Datadog().SetWithoutSource("logs_config.close_timeout", 42) + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.close_timeout", 42) sleepDuration := 10 * time.Millisecond info := status.NewInfoRegistry() @@ -352,10 +352,10 @@ func (suite *TailerTestSuite) TestBuildTagsFileDir() { } func (suite *TailerTestSuite) TestTruncatedTag() { - coreConfig.Datadog().SetWithoutSource("logs_config.max_message_size_bytes", 3) - coreConfig.Datadog().SetWithoutSource("logs_config.tag_truncated_logs", true) - defer coreConfig.Datadog().SetWithoutSource("logs_config.max_message_size_bytes", coreConfig.DefaultMaxMessageSizeBytes) - defer coreConfig.Datadog().SetWithoutSource("logs_config.tag_truncated_logs", false) + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.max_message_size_bytes", 3) + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.tag_truncated_logs", true) + defer pkgconfigsetup.Datadog().SetWithoutSource("logs_config.max_message_size_bytes", pkgconfigsetup.DefaultMaxMessageSizeBytes) + defer pkgconfigsetup.Datadog().SetWithoutSource("logs_config.tag_truncated_logs", false) source := sources.NewLogSource("", &config.LogsConfig{ Type: config.FileType, @@ -380,7 +380,7 @@ func (suite *TailerTestSuite) TestTruncatedTag() { msg := <-suite.outputChan tags := msg.Tags() - suite.Contains(tags, message.TruncatedTag) + suite.Contains(tags, message.TruncatedReasonTag("single_line")) } func (suite *TailerTestSuite) TestMutliLineAutoDetect() { diff --git a/pkg/logs/tailers/socket/tailer.go b/pkg/logs/tailers/socket/tailer.go index a2c791997f2df..ad28d251058d1 100644 --- a/pkg/logs/tailers/socket/tailer.go +++ b/pkg/logs/tailers/socket/tailer.go @@ -12,7 +12,7 @@ import ( "net" "strings" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder" "github.com/DataDog/datadog-agent/pkg/logs/internal/parsers/noop" "github.com/DataDog/datadog-agent/pkg/logs/message" @@ -100,7 +100,7 @@ func (t *Tailer) readForever() { } copiedTags := make([]string, len(t.source.Config.Tags)) copy(copiedTags, t.source.Config.Tags) - if ipAddress != "" && coreConfig.Datadog().GetBool("logs_config.use_sourcehost_tag") { + if ipAddress != "" && pkgconfigsetup.Datadog().GetBool("logs_config.use_sourcehost_tag") { lastColonIndex := strings.LastIndex(ipAddress, ":") var ipAddressWithoutPort string if lastColonIndex != -1 { diff --git a/pkg/logs/util/testutils/go.mod b/pkg/logs/util/testutils/go.mod index 2e1a8cc78da2c..12c5ba3229626 100644 --- a/pkg/logs/util/testutils/go.mod +++ b/pkg/logs/util/testutils/go.mod @@ -16,7 +16,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ../../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../../../config/utils github.com/DataDog/datadog-agent/pkg/logs/sources => ../../sources github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface => ../../status/statusinterface @@ -41,25 +44,28 @@ replace ( require github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -77,7 +83,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -85,11 +91,11 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/pkg/logs/util/testutils/go.sum b/pkg/logs/util/testutils/go.sum index 77ba213060c82..765bdc23a7bf4 100644 --- a/pkg/logs/util/testutils/go.sum +++ b/pkg/logs/util/testutils/go.sum @@ -180,8 +180,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -251,15 +252,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +296,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -312,8 +313,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/metrics/go.mod b/pkg/metrics/go.mod index 9ddbec9712b46..c9baa39e3633a 100644 --- a/pkg/metrics/go.mod +++ b/pkg/metrics/go.mod @@ -27,7 +27,7 @@ require ( github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 github.com/stretchr/testify v1.9.0 go.uber.org/atomic v1.11.0 ) diff --git a/pkg/metrics/go.sum b/pkg/metrics/go.sum index a8bc0739ad6c7..8ba22b6a12d97 100644 --- a/pkg/metrics/go.sum +++ b/pkg/metrics/go.sum @@ -39,10 +39,10 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.14.0 h1:J0IEqkrB8BjtuDHofR8Q3J+Z8829Ja1Mlix9cyG8wJI= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.14.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0 h1:QHx6B/VUx3rZQqrQNZI5BfypbhhGSRzCz05viyJEQmM= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0/go.mod h1:q4c7zbmdnIdSJNZuBsveTk5ZeRkSkS2g6b8zzFF1mE4= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/pkg/metrics/metricsource.go b/pkg/metrics/metricsource.go index f5cecdcd591f9..9868f5925d086 100644 --- a/pkg/metrics/metricsource.go +++ b/pkg/metrics/metricsource.go @@ -215,7 +215,6 @@ const ( MetricSourceKubeAPIserverMetrics MetricSourceKubeControllerManager MetricSourceKubeDNS - MetricSourceKubeflow MetricSourceKubeMetricsServer MetricSourceKubeProxy MetricSourceKubeScheduler @@ -292,6 +291,12 @@ const ( MetricSourceYarn MetricSourceZk MetricSourceAwsNeuron + MetricSourceTibcoEMS + MetricSourceSlurm + MetricSourceKyverno + MetricSourceKubeflow + MetricSourceAppgateSDP + MetricSourceAnyscale ) // String returns a string representation of MetricSource @@ -1051,8 +1056,6 @@ func CheckNameToMetricSource(name string) MetricSource { return MetricSourceKubeProxy case "kube_scheduler": return MetricSourceKubeScheduler - case "kubeflow": - return MetricSourceKubeflow case "kubelet": return MetricSourceKubelet case "kubernetes_state": @@ -1371,6 +1374,16 @@ func CheckNameToMetricSource(name string) MetricSource { return MetricSourceZenohRouter case "aws_neuron": return MetricSourceAwsNeuron + case "kyverno": + return MetricSourceKyverno + case "anyscale": + return MetricSourceAnyscale + case "appgate_sdp": + return MetricSourceAppgateSDP + case "slurm": + return MetricSourceSlurm + case "tibco_ems": + return MetricSourceTibcoEMS default: return MetricSourceUnknown } diff --git a/pkg/network/config/config.go b/pkg/network/config/config.go index 06dc994e0da48..1b8ef55fb4df8 100644 --- a/pkg/network/config/config.go +++ b/pkg/network/config/config.go @@ -14,7 +14,7 @@ import ( "github.com/cilium/ebpf/features" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -310,10 +310,6 @@ type Config struct { // EnableUSMEventStream enables USM to use the event stream instead // of netlink for receiving process events. EnableUSMEventStream bool - - // BypassEnabled is used in tests only. - // It enables a ebpf-manager feature to bypass programs on-demand for controlled visibility. - BypassEnabled bool } func join(pieces ...string) string { @@ -322,7 +318,7 @@ func join(pieces ...string) string { // New creates a config for the network tracer func New() *Config { - cfg := ddconfig.SystemProbe() + cfg := pkgconfigsetup.SystemProbe() sysconfig.Adjust(cfg) c := &Config{ diff --git a/pkg/network/config/config_bpf_linux_test.go b/pkg/network/config/config_bpf_linux_test.go index e37e18136da70..a3b380c89fb2a 100644 --- a/pkg/network/config/config_bpf_linux_test.go +++ b/pkg/network/config/config_bpf_linux_test.go @@ -14,11 +14,8 @@ import ( "github.com/stretchr/testify/require" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/eventmonitor" emconfig "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" @@ -39,11 +36,7 @@ func TestEventStreamEnabledForSupportedKernelsLinux(t *testing.T) { opts := eventmonitor.Opts{} telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) - wmeta := fxutil.Test[workloadmeta.Component](t, - core.MockBundle(), - wmmock.MockModule(workloadmeta.NewParams()), - ) - evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, wmeta, telemetry) + evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, telemetry) require.NoError(t, err) require.NoError(t, evm.Init()) } else { diff --git a/pkg/network/config/replace_rules.go b/pkg/network/config/replace_rules.go index 093ba772a0f35..ce8e1654766f0 100644 --- a/pkg/network/config/replace_rules.go +++ b/pkg/network/config/replace_rules.go @@ -10,7 +10,8 @@ import ( "fmt" "regexp" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // ReplaceRule specifies a replace rule. @@ -25,8 +26,8 @@ type ReplaceRule struct { Repl string `mapstructure:"repl"` } -func parseReplaceRules(cfg ddconfig.Config, key string) ([]*ReplaceRule, error) { - if !ddconfig.SystemProbe().IsSet(key) { +func parseReplaceRules(cfg model.Config, key string) ([]*ReplaceRule, error) { + if !pkgconfigsetup.SystemProbe().IsSet(key) { return nil, nil } diff --git a/pkg/network/dns/cache_test.go b/pkg/network/dns/cache_test.go index bb025deec57e8..b56e0359b6591 100644 --- a/pkg/network/dns/cache_test.go +++ b/pkg/network/dns/cache_test.go @@ -11,6 +11,7 @@ import ( cryptorand "crypto/rand" "fmt" "math/rand" + "net/netip" "strings" "testing" "time" @@ -329,7 +330,7 @@ func randomAddressGen() func() util.Address { continue } - return util.V4AddressFromBytes(b) + return util.Address{Addr: netip.AddrFrom4([4]byte(b))} } } } diff --git a/pkg/network/driver/types_windows.go b/pkg/network/driver/types_windows.go index 184464cd0965f..1dbeea0cb9197 100644 --- a/pkg/network/driver/types_windows.go +++ b/pkg/network/driver/types_windows.go @@ -114,8 +114,8 @@ type PerFlowData struct { AddressFamily uint16 Protocol uint16 Flags uint32 - LocalAddress [16]uint8 - RemoteAddress [16]uint8 + LocalAddress [16]byte + RemoteAddress [16]byte PacketsOut uint64 MonotonicSentBytes uint64 TransportBytesOut uint64 diff --git a/pkg/network/ebpf/c/co-re/tracer-fentry.c b/pkg/network/ebpf/c/co-re/tracer-fentry.c index 857d2f922cde6..09259e948047d 100644 --- a/pkg/network/ebpf/c/co-re/tracer-fentry.c +++ b/pkg/network/ebpf/c/co-re/tracer-fentry.c @@ -201,7 +201,7 @@ int BPF_PROG(udp_sendpage_exit, struct sock *sk, struct page *page, int offset, return 0; } - return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, sk); + return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk); } SEC("fexit/tcp_recvmsg") @@ -232,9 +232,6 @@ int BPF_PROG(tcp_close, struct sock *sk, long timeout) { conn_tuple_t t = {}; u64 pid_tgid = bpf_get_current_pid_tgid(); - // Should actually delete something only if the connection never got established - bpf_map_delete_elem(&tcp_ongoing_connect_pid, &sk); - // Get network namespace id log_debug("fentry/tcp_close: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { @@ -242,6 +239,11 @@ int BPF_PROG(tcp_close, struct sock *sk, long timeout) { } log_debug("fentry/tcp_close: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + skp_conn.tup.pid = 0; + + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); + cleanup_conn(ctx, &t, sk); return 0; } @@ -262,7 +264,7 @@ static __always_inline int handle_udp_send(struct sock *sk, int sent) { if (sent > 0) { log_debug("udp_sendmsg: sent: %d", sent); - handle_message(t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_NONE, sk); + handle_message(t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk); } bpf_map_delete_elem(&udp_send_skb_args, &pid_tgid); @@ -450,7 +452,15 @@ int BPF_PROG(tcp_connect, struct sock *sk) { u64 pid_tgid = bpf_get_current_pid_tgid(); log_debug("fentry/tcp_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &sk, &pid_tgid, BPF_ANY); + conn_tuple_t t = {}; + if (!read_conn_tuple(&t, sk, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_connect_failed_tuple); + return 0; + } + + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + pid_ts_t pid_ts = {.pid_tgid = pid_tgid, .timestamp = bpf_ktime_get_ns()}; + bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp_conn, &pid_ts, BPF_ANY); return 0; } @@ -458,19 +468,19 @@ int BPF_PROG(tcp_connect, struct sock *sk) { SEC("fentry/tcp_finish_connect") int BPF_PROG(tcp_finish_connect, struct sock *sk, struct sk_buff *skb, int rc) { RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/tcp_finish_connect"); - u64 *pid_tgid_p = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &sk); - if (!pid_tgid_p) { + conn_tuple_t t = {}; + if (!read_conn_tuple(&t, sk, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_finish_connect_failed_tuple); return 0; } - - u64 pid_tgid = *pid_tgid_p; - bpf_map_delete_elem(&tcp_ongoing_connect_pid, &sk); - log_debug("fentry/tcp_finish_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - - conn_tuple_t t = {}; - if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + pid_ts_t *pid_tgid_p = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &skp_conn); + if (!pid_tgid_p) { return 0; } + u64 pid_tgid = pid_tgid_p->pid_tgid; + t.pid = pid_tgid >> 32; + log_debug("fentry/tcp_finish_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); handle_tcp_stats(&t, sk, TCP_ESTABLISHED); handle_message(&t, 0, 0, CONN_DIRECTION_OUTGOING, 0, 0, PACKET_COUNT_NONE, sk); @@ -501,6 +511,10 @@ int BPF_PROG(inet_csk_accept_exit, struct sock *_sk, int flags, int *err, bool k pb.netns = t.netns; pb.port = t.sport; add_port_bind(&pb, port_bindings); + + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + pid_ts_t pid_ts = {.pid_tgid = pid_tgid, .timestamp = bpf_ktime_get_ns()}; + bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp_conn, &pid_ts, BPF_ANY); log_debug("fexit/inet_csk_accept: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); return 0; } diff --git a/pkg/network/ebpf/c/protocols/classification/defs.h b/pkg/network/ebpf/c/protocols/classification/defs.h index d44d2adb8d8ce..823112a4fb7e1 100644 --- a/pkg/network/ebpf/c/protocols/classification/defs.h +++ b/pkg/network/ebpf/c/protocols/classification/defs.h @@ -143,10 +143,10 @@ typedef enum { PROG_HTTP2_EOS_PARSER, PROG_HTTP2_TERMINATION, PROG_KAFKA, - PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0, - PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12, - PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0, - PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12, + PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0, + PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12, + PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0, + PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12, PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0, PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9, PROG_KAFKA_TERMINATION, diff --git a/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h b/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h index f20905e92e041..e55ab7954124d 100644 --- a/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h +++ b/pkg/network/ebpf/c/protocols/kafka/kafka-parsing.h @@ -1114,9 +1114,9 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * switch (level) { case PARSER_LEVEL_RECORD_BATCH: if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12; + index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12; } else { - index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0; + index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0; } break; case PARSER_LEVEL_PARTITION: @@ -1124,9 +1124,9 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * switch (api_key) { case KAFKA_FETCH: if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12; + index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12; } else { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0; + index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0; } break; case KAFKA_PRODUCE: @@ -1147,9 +1147,9 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * switch (level) { case PARSER_LEVEL_RECORD_BATCH: if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12; + index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12; } else { - index = PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0; + index = PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0; } break; case PARSER_LEVEL_PARTITION: @@ -1157,9 +1157,9 @@ static __always_inline void kafka_call_response_parser(void *ctx, conn_tuple_t * switch (api_key) { case KAFKA_FETCH: if (api_version >= 12) { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12; + index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12; } else { - index = PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0; + index = PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0; } break; case KAFKA_PRODUCE: @@ -1410,23 +1410,23 @@ static __always_inline int __socket__kafka_response_parser(struct __sk_buff *skb return 0; } -SEC("socket/kafka_response_partition_parser_v0") -int socket__kafka_response_partition_parser_v0(struct __sk_buff *skb) { +SEC("socket/kafka_fetch_response_partition_parser_v0") +int socket__kafka_fetch_response_partition_parser_v0(struct __sk_buff *skb) { return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 0, 11, KAFKA_FETCH); } -SEC("socket/kafka_response_partition_parser_v12") -int socket__kafka_response_partition_parser_v12(struct __sk_buff *skb) { +SEC("socket/kafka_fetch_response_partition_parser_v12") +int socket__kafka_fetch_response_partition_parser_v12(struct __sk_buff *skb) { return __socket__kafka_response_parser(skb, PARSER_LEVEL_PARTITION, 12, 12, KAFKA_FETCH); } -SEC("socket/kafka_response_record_batch_parser_v0") -int socket__kafka_response_record_batch_parser_v0(struct __sk_buff *skb) { +SEC("socket/kafka_fetch_response_record_batch_parser_v0") +int socket__kafka_fetch_response_record_batch_parser_v0(struct __sk_buff *skb) { return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 0, 11, KAFKA_FETCH); } -SEC("socket/kafka_response_record_batch_parser_v12") -int socket__kafka_response_record_batch_parser_v12(struct __sk_buff *skb) { +SEC("socket/kafka_fetch_response_record_batch_parser_v12") +int socket__kafka_fetch_response_record_batch_parser_v12(struct __sk_buff *skb) { return __socket__kafka_response_parser(skb, PARSER_LEVEL_RECORD_BATCH, 12, 12, KAFKA_FETCH); } @@ -1460,23 +1460,23 @@ static __always_inline int __uprobe__kafka_tls_response_parser(struct pt_regs *c return 0; } -SEC("uprobe/kafka_tls_response_partition_parser_v0") -int uprobe__kafka_tls_response_partition_parser_v0(struct pt_regs *ctx) { +SEC("uprobe/kafka_tls_fetch_response_partition_parser_v0") +int uprobe__kafka_tls_fetch_response_partition_parser_v0(struct pt_regs *ctx) { return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 0, 11, KAFKA_FETCH); } -SEC("uprobe/kafka_tls_response_partition_parser_v12") -int uprobe__kafka_tls_response_partition_parser_v12(struct pt_regs *ctx) { +SEC("uprobe/kafka_tls_fetch_response_partition_parser_v12") +int uprobe__kafka_tls_fetch_response_partition_parser_v12(struct pt_regs *ctx) { return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_PARTITION, 12, 12, KAFKA_FETCH); } -SEC("uprobe/kafka_tls_response_record_batch_parser_v0") -int uprobe__kafka_tls_response_record_batch_parser_v0(struct pt_regs *ctx) { +SEC("uprobe/kafka_tls_fetch_response_record_batch_parser_v0") +int uprobe__kafka_tls_fetch_response_record_batch_parser_v0(struct pt_regs *ctx) { return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 0, 11, KAFKA_FETCH); } -SEC("uprobe/kafka_tls_response_record_batch_parser_v12") -int uprobe__kafka_tls_response_record_batch_parser_v12(struct pt_regs *ctx) { +SEC("uprobe/kafka_tls_fetch_response_record_batch_parser_v12") +int uprobe__kafka_tls_fetch_response_record_batch_parser_v12(struct pt_regs *ctx) { return __uprobe__kafka_tls_response_parser(ctx, PARSER_LEVEL_RECORD_BATCH, 12, 12, KAFKA_FETCH); } diff --git a/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h b/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h index 58f3ce3ac771f..1c66dee1d0abc 100644 --- a/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h +++ b/pkg/network/ebpf/c/protocols/tls/go-tls-maps.h @@ -9,10 +9,6 @@ // offsets_data map contains the information about the locations of structs in the inspected binary, mapped by the binary's inode number. BPF_HASH_MAP(offsets_data, go_tls_offsets_data_key_t, tls_offsets_data_t, 1024) -// Maps PID to the - tuple, that is used to find the offsets_data map for the binary. -// Size is a 10 times the size of the offsets_data map, to have enough space for all the binaries. -BPF_HASH_MAP(pid_to_device_inode, u32, go_tls_offsets_data_key_t, 10240) - /* go_tls_read_args is used to get the read function info when running in the read-return uprobe. The key contains the go routine id and the pid. */ BPF_LRU_MAP(go_tls_read_args, go_tls_function_args_key_t, go_tls_read_args_data_t, 2048) diff --git a/pkg/network/ebpf/c/protocols/tls/https.h b/pkg/network/ebpf/c/protocols/tls/https.h index ccb9550f3602c..db485ab14403c 100644 --- a/pkg/network/ebpf/c/protocols/tls/https.h +++ b/pkg/network/ebpf/c/protocols/tls/https.h @@ -3,6 +3,10 @@ #ifdef COMPILE_CORE #include "ktypes.h" +#define MINORBITS 20 +#define MINORMASK ((1U << MINORBITS) - 1) +#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) +#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) #else #include #include @@ -283,19 +287,41 @@ static __always_inline void map_ssl_ctx_to_sock(struct sock *skp) { bpf_map_update_with_telemetry(ssl_sock_by_ctx, &ssl_ctx, &ssl_sock, BPF_ANY); } - -// Retrieves the result of binary analysis for the current task binary's inode number. -// For the current PID, we retrieve the inode number of the binary and then we look up the binary's analysis result. +/** + * get_offsets_data retrieves the result of binary analysis for the + * current task binary's inode number. + */ static __always_inline tls_offsets_data_t* get_offsets_data() { - u64 pid_tgid = bpf_get_current_pid_tgid(); - u32 pid = pid_tgid >> 32; - go_tls_offsets_data_key_t *key = bpf_map_lookup_elem(&pid_to_device_inode, &pid); - if (key == NULL) { - log_debug("get_offsets_data: could not find key for pid %u", pid); + struct task_struct *t = (struct task_struct *) bpf_get_current_task(); + struct inode *inode; + go_tls_offsets_data_key_t key; + dev_t dev_id; + + inode = BPF_CORE_READ(t, mm, exe_file, f_inode); + if (!inode) { + log_debug("get_offsets_data: could not read f_inode field"); + return NULL; + } + + int err; + err = BPF_CORE_READ_INTO(&key.ino, inode, i_ino); + if (err) { + log_debug("get_offsets_data: could not read i_ino field"); return NULL; } - go_tls_offsets_data_key_t key_copy = *key; - return bpf_map_lookup_elem(&offsets_data, &key_copy); + + err = BPF_CORE_READ_INTO(&dev_id, inode, i_sb, s_dev); + if (err) { + log_debug("get_offsets_data: could not read s_dev field"); + return NULL; + } + + key.device_id_major = MAJOR(dev_id); + key.device_id_minor = MINOR(dev_id); + + log_debug("get_offsets_data: task binary inode number: %llu; device ID %x:%x", key.ino, key.device_id_major, key.device_id_minor); + + return bpf_map_lookup_elem(&offsets_data, &key); } #endif diff --git a/pkg/network/ebpf/c/tracer.c b/pkg/network/ebpf/c/tracer.c index 66482fb92a092..7555758f40524 100644 --- a/pkg/network/ebpf/c/tracer.c +++ b/pkg/network/ebpf/c/tracer.c @@ -187,48 +187,48 @@ int BPF_BYPASSABLE_KRETPROBE(kretprobe__udp_sendpage, int sent) { return 0; } - return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, skp); + return handle_message(&t, sent, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, skp); } SEC("kprobe/tcp_done") int BPF_BYPASSABLE_KPROBE(kprobe__tcp_done, struct sock *sk) { conn_tuple_t t = {}; - u64 pid_tgid = bpf_get_current_pid_tgid(); - __u64 *failed_conn_pid = NULL; + + if (!read_conn_tuple(&t, sk, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_done_failed_tuple); + return 0; + } + log_debug("kprobe/tcp_done: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; if (!tcp_failed_connections_enabled()) { + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); return 0; } int err = 0; bpf_probe_read_kernel_with_telemetry(&err, sizeof(err), (&sk->sk_err)); if (err == 0) { + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); return 0; // no failure } if (err != TCP_CONN_FAILED_RESET && err != TCP_CONN_FAILED_TIMEOUT && err != TCP_CONN_FAILED_REFUSED) { log_debug("kprobe/tcp_done: unsupported error code: %d", err); increment_telemetry_count(unsupported_tcp_failures); + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); return 0; } - log_debug("kprobe/tcp_done: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { - return 0; - } - log_debug("kprobe/tcp_done: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); - - // connection timeouts will have 0 pids as they are cleaned up by an idle process. - // get the pid from the ongoing failure map in this case, as it should have been set in connect(). - failed_conn_pid = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &sk); + // connection timeouts will have 0 pids as they are cleaned up by an idle process. + // resets can also have kernel pids are they are triggered by receiving an RST packet from the server + // get the pid from the ongoing failure map in this case, as it should have been set in connect(). else bail + pid_ts_t *failed_conn_pid = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &skp_conn); if (failed_conn_pid) { - if (*failed_conn_pid != pid_tgid) { - increment_telemetry_count(tcp_done_pid_mismatch); - } - bpf_probe_read_kernel_with_telemetry(&pid_tgid, sizeof(pid_tgid), failed_conn_pid); - t.pid = pid_tgid >> 32; - } - if (t.pid == 0) { + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); + t.pid = failed_conn_pid->pid_tgid >> 32; + } else { + increment_telemetry_count(tcp_done_missing_pid); return 0; } @@ -238,13 +238,12 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_done, struct sock *sk) { __u64 timestamp = bpf_ktime_get_ns(); if (bpf_map_update_with_telemetry(conn_close_flushed, &t, ×tamp, BPF_NOEXIST, -EEXIST) == 0) { cleanup_conn(ctx, &t, sk); + flush_tcp_failure(ctx, &t, err); } else { bpf_map_delete_elem(&conn_close_flushed, &t); increment_telemetry_count(double_flush_attempts_done); } - flush_tcp_failure(ctx, &t, err); - return 0; } @@ -259,11 +258,6 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_close, struct sock *sk) { conn_tuple_t t = {}; u64 pid_tgid = bpf_get_current_pid_tgid(); - // increment telemetry for connections that were never established - if (bpf_map_delete_elem(&tcp_ongoing_connect_pid, &sk) == 0) { - increment_telemetry_count(tcp_failed_connect); - } - // Get network namespace id log_debug("kprobe/tcp_close: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { @@ -277,12 +271,28 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_close, struct sock *sk) { bpf_map_update_with_telemetry(tcp_close_args, &pid_tgid, &t, BPF_ANY); } + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + skp_conn.tup.pid = 0; + + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); + + if (!tcp_failed_connections_enabled()) { + cleanup_conn(ctx, &t, sk); + return 0; + } + // check if this connection was already flushed and ensure we don't flush again // upsert the timestamp to the map and delete if it already exists, flush connection otherwise // skip EEXIST errors for telemetry since it is an expected error __u64 timestamp = bpf_ktime_get_ns(); - if (!tcp_failed_connections_enabled() || (bpf_map_update_with_telemetry(conn_close_flushed, &t, ×tamp, BPF_NOEXIST, -EEXIST) == 0)) { + if (bpf_map_update_with_telemetry(conn_close_flushed, &t, ×tamp, BPF_NOEXIST, -EEXIST) == 0) { cleanup_conn(ctx, &t, sk); + int err = 0; + bpf_probe_read_kernel_with_telemetry(&err, sizeof(err), (&sk->sk_err)); + if (err == TCP_CONN_FAILED_RESET || err == TCP_CONN_FAILED_TIMEOUT || err == TCP_CONN_FAILED_REFUSED) { + increment_telemetry_count(tcp_close_target_failures); + flush_tcp_failure(ctx, &t, err); + } } else { bpf_map_delete_elem(&conn_close_flushed, &t); increment_telemetry_count(double_flush_attempts_close); @@ -420,7 +430,7 @@ static __always_inline int handle_ip6_skb(struct sock *sk, size_t size, struct f } log_debug("kprobe/ip6_make_skb: pid_tgid: %llu, size: %zu", pid_tgid, size); - handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 0, 0, PACKET_COUNT_NONE, sk); + handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk); increment_telemetry_count(udp_send_processed); return 0; @@ -597,9 +607,7 @@ static __always_inline int handle_ip_skb(struct sock *sk, size_t size, struct fl log_debug("kprobe/ip_make_skb: pid_tgid: %llu, size: %zu", pid_tgid, size); - // segment count is not currently enabled on prebuilt. - // to enable, change PACKET_COUNT_NONE => PACKET_COUNT_INCREMENT - handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_NONE, sk); + handle_message(&t, size, 0, CONN_DIRECTION_UNKNOWN, 1, 0, PACKET_COUNT_INCREMENT, sk); increment_telemetry_count(udp_send_processed); return 0; @@ -767,9 +775,7 @@ static __always_inline int handle_ret_udp_recvmsg_pre_4_7_0(int copied, void *ud bpf_map_delete_elem(udp_sock_map, &pid_tgid); log_debug("kretprobe/udp_recvmsg: pid_tgid: %llu, return: %d", pid_tgid, copied); - // segment count is not currently enabled on prebuilt. - // to enable, change PACKET_COUNT_NONE => PACKET_COUNT_INCREMENT - handle_message(&t, 0, copied, CONN_DIRECTION_UNKNOWN, 0, 1, PACKET_COUNT_NONE, st->sk); + handle_message(&t, 0, copied, CONN_DIRECTION_UNKNOWN, 0, 1, PACKET_COUNT_INCREMENT, st->sk); return 0; } @@ -946,27 +952,36 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_connect, struct sock *skp) { u64 pid_tgid = bpf_get_current_pid_tgid(); log_debug("kprobe/tcp_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp, &pid_tgid, BPF_ANY); + conn_tuple_t t = {}; + if (!read_conn_tuple(&t, skp, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_connect_failed_tuple); + return 0; + } + + skp_conn_tuple_t skp_conn = {.sk = skp, .tup = t}; + pid_ts_t pid_ts = {.pid_tgid = pid_tgid, .timestamp = bpf_ktime_get_ns()}; + bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp_conn, &pid_ts, BPF_ANY); return 0; } SEC("kprobe/tcp_finish_connect") int BPF_BYPASSABLE_KPROBE(kprobe__tcp_finish_connect, struct sock *skp) { - u64 *pid_tgid_p = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &skp); + conn_tuple_t t = {}; + if (!read_conn_tuple(&t, skp, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_finish_connect_failed_tuple); + return 0; + } + skp_conn_tuple_t skp_conn = {.sk = skp, .tup = t}; + pid_ts_t *pid_tgid_p = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &skp_conn); if (!pid_tgid_p) { return 0; } - u64 pid_tgid = *pid_tgid_p; - bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp); + u64 pid_tgid = pid_tgid_p->pid_tgid; + t.pid = pid_tgid >> 32; log_debug("kprobe/tcp_finish_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - conn_tuple_t t = {}; - if (!read_conn_tuple(&t, skp, pid_tgid, CONN_TYPE_TCP)) { - return 0; - } - handle_tcp_stats(&t, skp, TCP_ESTABLISHED); handle_message(&t, 0, 0, CONN_DIRECTION_OUTGOING, 0, 0, PACKET_COUNT_NONE, skp); @@ -988,6 +1003,8 @@ int BPF_BYPASSABLE_KRETPROBE(kretprobe__inet_csk_accept, struct sock *sk) { if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { return 0; } + log_debug("kretprobe/inet_csk_accept: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); + handle_tcp_stats(&t, sk, TCP_ESTABLISHED); handle_message(&t, 0, 0, CONN_DIRECTION_INCOMING, 0, 0, PACKET_COUNT_NONE, sk); @@ -996,7 +1013,11 @@ int BPF_BYPASSABLE_KRETPROBE(kretprobe__inet_csk_accept, struct sock *sk) { pb.port = t.sport; add_port_bind(&pb, port_bindings); - log_debug("kretprobe/inet_csk_accept: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + skp_conn.tup.pid = 0; + pid_ts_t pid_ts = {.pid_tgid = pid_tgid, .timestamp = bpf_ktime_get_ns()}; + bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp_conn, &pid_ts, BPF_ANY); + return 0; } diff --git a/pkg/network/ebpf/c/tracer/events.h b/pkg/network/ebpf/c/tracer/events.h index 051e5821c3c00..5e0650faa862f 100644 --- a/pkg/network/ebpf/c/tracer/events.h +++ b/pkg/network/ebpf/c/tracer/events.h @@ -151,6 +151,7 @@ static __always_inline void flush_tcp_failure(void *ctx, conn_tuple_t *tup, int u32 cpu = bpf_get_smp_processor_id(); bpf_perf_event_output(ctx, &conn_fail_event, cpu, &failure, sizeof(conn_failed_t)); } + increment_telemetry_count(tcp_failed_connect); } static __always_inline void flush_conn_close_if_full(void *ctx) { diff --git a/pkg/network/ebpf/c/tracer/maps.h b/pkg/network/ebpf/c/tracer/maps.h index bf00f93b9e7a3..f02066fe4b4dc 100644 --- a/pkg/network/ebpf/c/tracer/maps.h +++ b/pkg/network/ebpf/c/tracer/maps.h @@ -23,8 +23,8 @@ BPF_HASH_MAP(tcp_stats, conn_tuple_t, tcp_stats_t, 0) */ BPF_HASH_MAP(tcp_retransmits, conn_tuple_t, __u32, 0) -/* Will hold the PIDs initiating TCP connections */ -BPF_HASH_MAP(tcp_ongoing_connect_pid, struct sock *, __u64, 1024) +/* Will hold the PIDs initiating TCP connections keyed by socket + tuple. PIDs have a timestamp attached so they can age out */ +BPF_HASH_MAP(tcp_ongoing_connect_pid, skp_conn_tuple_t, pid_ts_t, 0) /* Will hold a flag to indicate that closed connections have already been flushed */ BPF_HASH_MAP(conn_close_flushed, conn_tuple_t, __u64, 8192) diff --git a/pkg/network/ebpf/c/tracer/telemetry.h b/pkg/network/ebpf/c/tracer/telemetry.h index 761088be75788..a6067f1fae501 100644 --- a/pkg/network/ebpf/c/tracer/telemetry.h +++ b/pkg/network/ebpf/c/tracer/telemetry.h @@ -24,7 +24,11 @@ enum telemetry_counter { double_flush_attempts_close, double_flush_attempts_done, unsupported_tcp_failures, - tcp_done_pid_mismatch, + tcp_done_missing_pid, + tcp_connect_failed_tuple, + tcp_done_failed_tuple, + tcp_finish_connect_failed_tuple, + tcp_close_target_failures, }; static __always_inline void increment_telemetry_count(enum telemetry_counter counter_name) { @@ -63,8 +67,20 @@ static __always_inline void increment_telemetry_count(enum telemetry_counter cou case unsupported_tcp_failures: __sync_fetch_and_add(&val->unsupported_tcp_failures, 1); break; - case tcp_done_pid_mismatch: - __sync_fetch_and_add(&val->tcp_done_pid_mismatch, 1); + case tcp_done_missing_pid: + __sync_fetch_and_add(&val->tcp_done_missing_pid, 1); + break; + case tcp_connect_failed_tuple: + __sync_fetch_and_add(&val->tcp_connect_failed_tuple, 1); + break; + case tcp_done_failed_tuple: + __sync_fetch_and_add(&val->tcp_done_failed_tuple, 1); + break; + case tcp_finish_connect_failed_tuple: + __sync_fetch_and_add(&val->tcp_finish_connect_failed_tuple, 1); + break; + case tcp_close_target_failures: + __sync_fetch_and_add(&val->tcp_close_target_failures, 1); break; } } diff --git a/pkg/network/ebpf/c/tracer/tracer.h b/pkg/network/ebpf/c/tracer/tracer.h index 018446b375397..cb2a96ff7a66c 100644 --- a/pkg/network/ebpf/c/tracer/tracer.h +++ b/pkg/network/ebpf/c/tracer/tracer.h @@ -114,7 +114,11 @@ typedef struct { __u64 double_flush_attempts_close; __u64 double_flush_attempts_done; __u64 unsupported_tcp_failures; - __u64 tcp_done_pid_mismatch; + __u64 tcp_done_missing_pid; + __u64 tcp_connect_failed_tuple; + __u64 tcp_done_failed_tuple; + __u64 tcp_finish_connect_failed_tuple; + __u64 tcp_close_target_failures; } telemetry_t; typedef struct { @@ -147,4 +151,14 @@ typedef struct { }; } ip_make_skb_args_t; +typedef struct { + struct sock *sk; + conn_tuple_t tup; +} skp_conn_tuple_t; + +typedef struct { + __u64 pid_tgid; + __u64 timestamp; +} pid_ts_t; + #endif diff --git a/pkg/network/ebpf/kprobe_types.go b/pkg/network/ebpf/kprobe_types.go index 4bc58cdfb864f..6745cdc0b7fee 100644 --- a/pkg/network/ebpf/kprobe_types.go +++ b/pkg/network/ebpf/kprobe_types.go @@ -21,6 +21,8 @@ type TCPStats C.tcp_stats_t type ConnStats C.conn_stats_ts_t type Conn C.conn_t type FailedConn C.conn_failed_t +type SkpConn C.skp_conn_tuple_t +type PidTs C.pid_ts_t type Batch C.batch_t type Telemetry C.telemetry_t type PortBinding C.port_binding_t diff --git a/pkg/network/ebpf/kprobe_types_linux.go b/pkg/network/ebpf/kprobe_types_linux.go index 63dd4b4c6e393..58cee5d1115aa 100644 --- a/pkg/network/ebpf/kprobe_types_linux.go +++ b/pkg/network/ebpf/kprobe_types_linux.go @@ -44,6 +44,14 @@ type FailedConn struct { Reason uint32 Pad_cgo_0 [4]byte } +type SkpConn struct { + Sk uint64 + Tup ConnTuple +} +type PidTs struct { + Tgid uint64 + Timestamp uint64 +} type Batch struct { C0 Conn C1 Conn @@ -55,17 +63,21 @@ type Batch struct { Pad_cgo_0 [2]byte } type Telemetry struct { - Tcp_failed_connect uint64 - Tcp_sent_miscounts uint64 - Unbatched_tcp_close uint64 - Unbatched_udp_close uint64 - Udp_sends_processed uint64 - Udp_sends_missed uint64 - Udp_dropped_conns uint64 - Double_flush_attempts_close uint64 - Double_flush_attempts_done uint64 - Unsupported_tcp_failures uint64 - Tcp_done_pid_mismatch uint64 + Tcp_failed_connect uint64 + Tcp_sent_miscounts uint64 + Unbatched_tcp_close uint64 + Unbatched_udp_close uint64 + Udp_sends_processed uint64 + Udp_sends_missed uint64 + Udp_dropped_conns uint64 + Double_flush_attempts_close uint64 + Double_flush_attempts_done uint64 + Unsupported_tcp_failures uint64 + Tcp_done_missing_pid uint64 + Tcp_connect_failed_tuple uint64 + Tcp_done_failed_tuple uint64 + Tcp_finish_connect_failed_tuple uint64 + Tcp_close_target_failures uint64 } type PortBinding struct { Netns uint32 @@ -77,12 +89,12 @@ type PIDFD struct { Fd uint32 } type UDPRecvSock struct { - Sk *_Ctype_struct_sock - Msg *_Ctype_struct_msghdr + Sk uint64 + Msg uint64 } type BindSyscallArgs struct { - Addr *_Ctype_struct_sockaddr - Sk *_Ctype_struct_sock + Addr uint64 + Sk uint64 } type ProtocolStack struct { Api uint8 diff --git a/pkg/network/ebpf/probes/probes.go b/pkg/network/ebpf/probes/probes.go index 1d455d9c6e364..8d8cfa5247542 100644 --- a/pkg/network/ebpf/probes/probes.go +++ b/pkg/network/ebpf/probes/probes.go @@ -192,8 +192,8 @@ const ( TCPStatsMap BPFMapName = "tcp_stats" // TCPRetransmitsMap is the map storing TCP retransmits TCPRetransmitsMap BPFMapName = "tcp_retransmits" - // TCPConnectSockPidMap is the map storing the PIDs of ongoing TCP connections - TCPConnectSockPidMap BPFMapName = "tcp_ongoing_connect_pid" + // TCPOngoingConnectPid is the map storing ongoing TCP connection PIDs by (socket + tuple) + TCPOngoingConnectPid BPFMapName = "tcp_ongoing_connect_pid" // ConnCloseFlushed is the map storing closed connections that were already flushed ConnCloseFlushed BPFMapName = "conn_close_flushed" // ConnCloseEventMap is the map storing connection close events diff --git a/pkg/network/encoding/encoding_test.go b/pkg/network/encoding/encoding_test.go index c23a02a8d3cc2..2e9806ea59297 100644 --- a/pkg/network/encoding/encoding_test.go +++ b/pkg/network/encoding/encoding_test.go @@ -19,8 +19,8 @@ import ( model "github.com/DataDog/agent-payload/v5/process" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/dns" "github.com/DataDog/datadog-agent/pkg/network/encoding/marshal" @@ -324,7 +324,7 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting application/json serialization (no query types)", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) out := getExpectedConnections(false, httpOutBlob) assert := assert.New(t) blobWriter := getBlobWriter(t, assert, in, "application/json") @@ -346,8 +346,8 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting application/json serialization (with query types)", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) out := getExpectedConnections(true, httpOutBlob) assert := assert.New(t) @@ -370,7 +370,7 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting empty serialization", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) out := getExpectedConnections(false, httpOutBlob) assert := assert.New(t) @@ -401,7 +401,7 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting unsupported serialization format", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) out := getExpectedConnections(false, httpOutBlob) assert := assert.New(t) @@ -457,7 +457,7 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting application/protobuf serialization (no query types)", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) out := getExpectedConnections(false, httpOutBlob) assert := assert.New(t) @@ -473,8 +473,8 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { }) t.Run("requesting application/protobuf serialization (with query types)", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) out := getExpectedConnections(true, httpOutBlob) assert := assert.New(t) diff --git a/pkg/network/encoding/marshal/dns.go b/pkg/network/encoding/marshal/dns.go index 0cab4bae202fd..ef945306a656b 100644 --- a/pkg/network/encoding/marshal/dns.go +++ b/pkg/network/encoding/marshal/dns.go @@ -7,7 +7,8 @@ package marshal import ( model "github.com/DataDog/agent-payload/v5/process" - "github.com/DataDog/datadog-agent/pkg/config" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/dns" ) @@ -27,8 +28,8 @@ func newDNSFormatter(conns *network.Connections, ipc ipCache) *dnsFormatter { conns: conns, ipc: ipc, domainSet: make(map[string]int), - queryTypeEnabled: config.SystemProbe().GetBool("network_config.enable_dns_by_querytype"), - dnsDomainsEnabled: config.SystemProbe().GetBool("system_probe_config.collect_dns_domains"), + queryTypeEnabled: pkgconfigsetup.SystemProbe().GetBool("network_config.enable_dns_by_querytype"), + dnsDomainsEnabled: pkgconfigsetup.SystemProbe().GetBool("system_probe_config.collect_dns_domains"), } } diff --git a/pkg/network/encoding/marshal/dns_test.go b/pkg/network/encoding/marshal/dns_test.go index c4cc2ee5b414d..18ee7b250947a 100644 --- a/pkg/network/encoding/marshal/dns_test.go +++ b/pkg/network/encoding/marshal/dns_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/dns" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -50,8 +50,8 @@ func TestFormatConnectionDNS(t *testing.T) { } t.Run("DNS with collect_domains_enabled=true,enable_dns_by_querytype=false", func(t *testing.T) { - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", false) ipc := make(ipCache) formatter := newDNSFormatter(payload, ipc) @@ -80,8 +80,8 @@ func TestFormatConnectionDNS(t *testing.T) { }) t.Run("DNS with collect_domains_enabled=true,enable_dns_by_querytype=true", func(t *testing.T) { - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) ipc := make(ipCache) formatter := newDNSFormatter(payload, ipc) diff --git a/pkg/network/encoding/marshal/modeler.go b/pkg/network/encoding/marshal/modeler.go index 22d3c9b77c8a9..f365ed34d91b5 100644 --- a/pkg/network/encoding/marshal/modeler.go +++ b/pkg/network/encoding/marshal/modeler.go @@ -10,7 +10,7 @@ import ( model "github.com/DataDog/agent-payload/v5/process" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" ) @@ -64,9 +64,10 @@ func (c *ConnectionsModeler) Close() { func (c *ConnectionsModeler) modelConnections(builder *model.ConnectionsBuilder, conns *network.Connections) { cfgOnce.Do(func() { agentCfg = &model.AgentConfiguration{ - NpmEnabled: config.SystemProbe().GetBool("network_config.enabled"), - UsmEnabled: config.SystemProbe().GetBool("service_monitoring_config.enabled"), - CcmEnabled: config.SystemProbe().GetBool("ccm_network_config.enabled"), + NpmEnabled: pkgconfigsetup.SystemProbe().GetBool("network_config.enabled"), + UsmEnabled: pkgconfigsetup.SystemProbe().GetBool("service_monitoring_config.enabled"), + CcmEnabled: pkgconfigsetup.SystemProbe().GetBool("ccm_network_config.enabled"), + CsmEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enabled"), } }) @@ -86,6 +87,7 @@ func (c *ConnectionsModeler) modelConnections(builder *model.ConnectionsBuilder, w.SetNpmEnabled(agentCfg.NpmEnabled) w.SetUsmEnabled(agentCfg.UsmEnabled) w.SetCcmEnabled(agentCfg.CcmEnabled) + w.SetCsmEnabled(agentCfg.CsmEnabled) }) for _, d := range c.dnsFormatter.Domains() { builder.AddDomains(d) diff --git a/pkg/network/encoding/marshal/modeler_test.go b/pkg/network/encoding/marshal/modeler_test.go new file mode 100644 index 0000000000000..bac1f11891670 --- /dev/null +++ b/pkg/network/encoding/marshal/modeler_test.go @@ -0,0 +1,67 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package marshal + +import ( + "strconv" + "sync" + "testing" + + model "github.com/DataDog/agent-payload/v5/process" + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/network" +) + +func TestConnectionModelerAgentConfiguration(t *testing.T) { + tests := []struct { + npm, usm, ccm, csm bool + }{ + {false, false, false, false}, + {false, false, true, false}, + {false, true, false, false}, + {false, true, true, false}, + {true, false, false, false}, + {true, false, true, false}, + {true, true, false, false}, + {true, true, true, false}, + {false, false, false, true}, + {false, false, true, true}, + {false, true, false, true}, + {false, true, true, true}, + {true, false, false, true}, + {true, false, true, true}, + {true, true, false, true}, + {true, true, true, true}, + } + + for _, te := range tests { + t.Run("", func(t *testing.T) { + t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLED", strconv.FormatBool(te.npm)) + t.Setenv("DD_SYSTEM_PROBE_SERVICE_MONITORING_ENABLED", strconv.FormatBool(te.usm)) + t.Setenv("DD_CCM_NETWORK_CONFIG_ENABLED", strconv.FormatBool(te.ccm)) + t.Setenv("DD_RUNTIME_SECURITY_CONFIG_ENABLED", strconv.FormatBool(te.csm)) + mock.NewSystemProbe(t) + cfgOnce = sync.Once{} + conns := &network.Connections{} + mod := NewConnectionsModeler(conns) + streamer := NewProtoTestStreamer[*model.Connections]() + builder := model.NewConnectionsBuilder(streamer) + expected := &model.AgentConfiguration{ + CcmEnabled: te.ccm, + CsmEnabled: te.csm, + UsmEnabled: te.usm, + NpmEnabled: te.npm, + } + + mod.modelConnections(builder, conns) + + actual := streamer.Unwrap(t, &model.Connections{}) + assert.Equal(t, expected, actual.AgentConfiguration) + }) + } +} diff --git a/pkg/network/encoding/marshal/usm.go b/pkg/network/encoding/marshal/usm.go index a9650e83b5778..bcae7a12ecc12 100644 --- a/pkg/network/encoding/marshal/usm.go +++ b/pkg/network/encoding/marshal/usm.go @@ -11,7 +11,7 @@ import ( "github.com/cihub/seelog" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" "github.com/DataDog/datadog-agent/pkg/network/types" @@ -56,7 +56,7 @@ func GroupByConnection[K comparable, V any](protocol string, data map[K]V, keyGe lookupFn: USMLookup[K, V], // Experimental: Connection Rollups - enableConnectionRollup: config.SystemProbe().GetBool("service_monitoring_config.enable_connection_rollup"), + enableConnectionRollup: pkgconfigsetup.SystemProbe().GetBool("service_monitoring_config.enable_connection_rollup"), } // The map intended to calculate how many entries we actually need in byConnection.data, and for each entry diff --git a/pkg/network/encoding/marshal/usm_postgres.go b/pkg/network/encoding/marshal/usm_postgres.go index ea7ed3a10659d..c1b21b9e1dd68 100644 --- a/pkg/network/encoding/marshal/usm_postgres.go +++ b/pkg/network/encoding/marshal/usm_postgres.go @@ -62,7 +62,7 @@ func (e *postgresEncoder) encodeData(connectionData *USMConnectionData[postgres. staticTags |= stats.StaticTags e.postgresAggregationsBuilder.AddAggregations(func(builder *model.DatabaseStatsBuilder) { builder.SetPostgres(func(statsBuilder *model.PostgresStatsBuilder) { - statsBuilder.SetTableName(key.TableName) + statsBuilder.SetTableName(key.Parameters) statsBuilder.SetOperation(uint64(toPostgresModelOperation(key.Operation))) if latencies := stats.Latencies; latencies != nil { blob, _ := proto.Marshal(latencies.ToProto()) @@ -106,6 +106,8 @@ func toPostgresModelOperation(op postgres.Operation) model.PostgresOperation { return model.PostgresOperation_PostgresAlterOp case postgres.TruncateTableOP: return model.PostgresOperation_PostgresTruncateOp + case postgres.ShowOP: + return model.PostgresOperation_PostgresShowOp default: return model.PostgresOperation_PostgresUnknownOp } diff --git a/pkg/network/event_common.go b/pkg/network/event_common.go index 6908b02965048..990c9f40797b7 100644 --- a/pkg/network/event_common.go +++ b/pkg/network/event_common.go @@ -10,6 +10,7 @@ package network import ( "encoding/binary" "fmt" + "net/netip" "strings" "time" @@ -353,10 +354,8 @@ const keyFmt = "p:%d|src:%s:%d|dst:%s:%d|f:%d|t:%d" // Note: This is only used in /debug/* endpoints func BeautifyKey(key string) string { bytesToAddress := func(buf []byte) util.Address { - if len(buf) == 4 { - return util.V4AddressFromBytes(buf) - } - return util.V6AddressFromBytes(buf) + addr, _ := netip.AddrFromSlice(buf) + return util.Address{Addr: addr} } raw := []byte(key) @@ -464,8 +463,8 @@ func generateConnectionKey(c ConnectionStats, buf []byte, useNAT bool) []byte { buf[n] = uint8(c.Family)<<4 | uint8(c.Type) n++ - n += laddr.WriteTo(buf[n:]) // 4 or 16 bytes - n += raddr.WriteTo(buf[n:]) // 4 or 16 bytes + n += copy(buf[n:], laddr.AsSlice()) // 4 or 16 bytes + n += copy(buf[n:], raddr.AsSlice()) // 4 or 16 bytes return buf[:n] } diff --git a/pkg/network/event_windows.go b/pkg/network/event_windows.go index ff9bed37d33f9..54ad5399f8c63 100644 --- a/pkg/network/event_windows.go +++ b/pkg/network/event_windows.go @@ -8,7 +8,7 @@ package network import ( - "net" + "net/netip" "syscall" "github.com/DataDog/datadog-agent/pkg/network/driver" @@ -57,14 +57,14 @@ func isTCPFlowEstablished(flow *driver.PerFlowData) bool { return false } -func convertV4Addr(addr [16]uint8) util.Address { +func convertV4Addr(addr [16]byte) util.Address { // We only read the first 4 bytes for v4 address - return util.V4AddressFromBytes(addr[:net.IPv4len]) + return util.Address{Addr: netip.AddrFrom4([4]byte(addr[:]))} } -func convertV6Addr(addr [16]uint8) util.Address { +func convertV6Addr(addr [16]byte) util.Address { // We read all 16 bytes for v6 address - return util.V6AddressFromBytes(addr[:net.IPv6len]) + return util.Address{Addr: netip.AddrFrom16(addr)} } // Monotonic values include retransmits and headers, while transport does not. We default to using transport diff --git a/pkg/network/events/monitor.go b/pkg/network/events/monitor.go index e06ddee483ea8..698bed3651681 100644 --- a/pkg/network/events/monitor.go +++ b/pkg/network/events/monitor.go @@ -147,6 +147,7 @@ func (h *eventConsumerWrapper) Copy(ev *model.Event) any { } } } + if len(tagsFound) < len(envFilter) { apmTags := getAPMTags(tagsFound, ev.GetExecFilePath()) if len(apmTags) > 0 { diff --git a/pkg/network/events/monitor_windows.go b/pkg/network/events/monitor_windows.go index 3698e001eb40f..e766e0a4bfa0f 100644 --- a/pkg/network/events/monitor_windows.go +++ b/pkg/network/events/monitor_windows.go @@ -62,7 +62,7 @@ func getAPMTags(already map[string]struct{}, filename string) []*intern.Value { tags := make([]*intern.Value, 0, 3) // see if there's an app.config in the directory - appConfig := filepath.Join(dir, "app.config") + appConfig := filename + ".config" ddJSON := filepath.Join(dir, "datadog.json") if _, err := os.Stat(appConfig); err == nil { @@ -74,7 +74,7 @@ func getAPMTags(already map[string]struct{}, filename string) []*intern.Value { } } } else if !errors.Is(err, os.ErrNotExist) { - log.Warnf("Error reading app.config: %v", err) + log.Warnf("Error reading app.config: %s %v", appConfig, err) } if len(already) == len(envFilter) { // we've seen all we need, no point in looking in datadog.json diff --git a/pkg/network/gateway_lookup_linux.go b/pkg/network/gateway_lookup_linux.go index 48657b61c1ece..9236bcd6bb8a9 100644 --- a/pkg/network/gateway_lookup_linux.go +++ b/pkg/network/gateway_lookup_linux.go @@ -18,7 +18,7 @@ import ( telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/process/util" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -73,7 +73,7 @@ func init() { func gwLookupEnabled() bool { // only enabled on AWS currently - return Cloud.IsAWS() && ddconfig.IsCloudProviderEnabled(ec2.CloudProviderName) + return Cloud.IsAWS() && pkgconfigsetup.IsCloudProviderEnabled(ec2.CloudProviderName, pkgconfigsetup.Datadog()) } // NewGatewayLookup creates a new instance of a gateway lookup using @@ -131,7 +131,7 @@ func (g *gatewayLookup) LookupWithIPs(source util.Address, dest util.Address, ne // if there is no gateway, we don't need to add subnet info // for gateway resolution in the backend - if r.Gateway.IsZero() || r.Gateway.IsUnspecified() { + if !r.Gateway.IsValid() || r.Gateway.IsUnspecified() { return nil } diff --git a/pkg/network/go/bininspect/pclntab.go b/pkg/network/go/bininspect/pclntab.go new file mode 100644 index 0000000000000..be68da2ad03bb --- /dev/null +++ b/pkg/network/go/bininspect/pclntab.go @@ -0,0 +1,307 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package bininspect + +import ( + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + pclntabSectionName = ".gopclntab" + + go116magic = 0xfffffffa + go118magic = 0xfffffff0 + go120magic = 0xfffffff1 +) + +// version of the pclntab +type version int + +const ( + verUnknown version = iota + ver11 + ver12 + ver116 + ver118 + ver120 +) + +var ( + // ErrMissingPCLNTABSection is returned when the pclntab section is missing. + ErrMissingPCLNTABSection = errors.New("failed to find pclntab section") + + // ErrUnsupportedPCLNTABVersion is returned when the pclntab version is not supported. + ErrUnsupportedPCLNTABVersion = errors.New("unsupported pclntab version") + + // ErrFailedToFindAllSymbols is returned when not all symbols were found. + ErrFailedToFindAllSymbols = errors.New("failed to find all symbols") +) + +// sectionAccess is a wrapper around elf.Section to provide ReadAt functionality. +// This is used to lazy read from the pclntab section, as the pclntab is large and we don't want to read it all at once, +// or store it in memory. +type sectionAccess struct { + section *elf.Section + baseOffset int64 +} + +// ReadAt reads len(p) bytes from the section starting at the given offset. +func (s *sectionAccess) ReadAt(outBuffer []byte, offset int64) (int, error) { + return s.section.ReadAt(outBuffer, s.baseOffset+offset) +} + +// pclntanSymbolParser is a parser for pclntab symbols. +// Similar to LineTable struct in https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L43 +type pclntanSymbolParser struct { + // section is the pclntab section. + section *elf.Section + // symbolFilter is the filter for the symbols. + symbolFilter symbolFilter + + // byteOrderParser is the binary.ByteOrder for the pclntab. + byteOrderParser binary.ByteOrder + // cachedVersion is the version of the pclntab. + cachedVersion version + // funcNameTable is the sectionAccess for the function name table. + funcNameTable sectionAccess + // funcData is the sectionAccess for the function data. + funcData sectionAccess + // funcTable is the sectionAccess for the function table. + funcTable sectionAccess + // funcTableSize is the size of the function table. + funcTableSize uint32 + // ptrSize is the size of a pointer in the architecture of the binary. + ptrSize uint32 + // ptrBufferSizeHelper is a buffer for reading pointers of the size ptrSize. + ptrBufferSizeHelper []byte + // funcNameHelper is a buffer for reading function names. Of the maximum size of the symbol names. + funcNameHelper []byte + // funcTableFieldSize is the size of a field in the function table. + funcTableFieldSize int + // funcTableBuffer is a buffer for reading fields in the function table. + funcTableBuffer []byte +} + +// GetPCLNTABSymbolParser returns the matching symbols from the pclntab section. +func GetPCLNTABSymbolParser(f *elf.File, symbolFilter symbolFilter) (map[string]*elf.Symbol, error) { + section := f.Section(pclntabSectionName) + if section == nil { + return nil, ErrMissingPCLNTABSection + } + + parser := &pclntanSymbolParser{section: section, symbolFilter: symbolFilter} + + if err := parser.parsePclntab(); err != nil { + return nil, err + } + // Late initialization, to prevent allocation if the binary is not supported. + _, maxSymbolsSize := symbolFilter.getMinMaxLength() + parser.funcNameHelper = make([]byte, maxSymbolsSize) + parser.funcTableFieldSize = getFuncTableFieldSize(parser.cachedVersion, int(parser.ptrSize)) + // Allocate the buffer for reading the function table. + // TODO: Do we need 2*funcTableFieldSize? + parser.funcTableBuffer = make([]byte, 2*parser.funcTableFieldSize) + return parser.getSymbols() +} + +// parsePclntab parses the pclntab, setting the version and verifying the header. +// Based on parsePclnTab in https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L194 +func (p *pclntanSymbolParser) parsePclntab() error { + p.cachedVersion = ver11 + + pclntabHeader := make([]byte, 8) + if n, err := p.section.ReadAt(pclntabHeader, 0); err != nil || n != len(pclntabHeader) { + return fmt.Errorf("failed to read pclntab header: %w", err) + } + // Matching the condition https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L216-L220 + // Check header: 4-byte magic, two zeros, pc quantum, pointer size. + if p.section.Size < 16 || pclntabHeader[4] != 0 || pclntabHeader[5] != 0 || + (pclntabHeader[6] != 1 && pclntabHeader[6] != 2 && pclntabHeader[6] != 4) || // pc quantum + (pclntabHeader[7] != 4 && pclntabHeader[7] != 8) { // pointer size + // TODO: add explicit error message + return errors.New("invalid pclntab header") + } + + leMagic := binary.LittleEndian.Uint32(pclntabHeader) + beMagic := binary.BigEndian.Uint32(pclntabHeader) + switch { + case leMagic == go116magic: + p.byteOrderParser, p.cachedVersion = binary.LittleEndian, ver116 + case beMagic == go116magic: + p.byteOrderParser, p.cachedVersion = binary.BigEndian, ver116 + case leMagic == go118magic: + p.byteOrderParser, p.cachedVersion = binary.LittleEndian, ver118 + case beMagic == go118magic: + p.byteOrderParser, p.cachedVersion = binary.BigEndian, ver118 + case leMagic == go120magic: + p.byteOrderParser, p.cachedVersion = binary.LittleEndian, ver120 + case beMagic == go120magic: + p.byteOrderParser, p.cachedVersion = binary.BigEndian, ver120 + default: + return ErrUnsupportedPCLNTABVersion + } + + p.ptrSize = uint32(pclntabHeader[7]) + p.ptrBufferSizeHelper = make([]byte, p.ptrSize) + + // offset is based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L252 + offset := func(word uint32) uint64 { + off := 8 + word*p.ptrSize + if n, err := p.section.ReadAt(p.ptrBufferSizeHelper, int64(off)); err != nil || n != int(p.ptrSize) { + return 0 + } + return p.uintptr(p.ptrBufferSizeHelper) + } + + switch p.cachedVersion { + case ver118, ver120: + p.funcTableSize = uint32(offset(0)) + p.funcNameTable = sectionAccess{ + section: p.section, + baseOffset: int64(offset(3)), + } + p.funcData = sectionAccess{ + section: p.section, + baseOffset: int64(offset(7)), + } + p.funcTable = sectionAccess{ + section: p.section, + baseOffset: int64(offset(7)), + } + case ver116: + p.funcTableSize = uint32(offset(0)) + p.funcNameTable = sectionAccess{ + section: p.section, + baseOffset: int64(offset(2)), + } + p.funcData = sectionAccess{ + section: p.section, + baseOffset: int64(offset(6)), + } + p.funcTable = sectionAccess{ + section: p.section, + baseOffset: int64(offset(6)), + } + } + + return nil +} + +// uintptr returns the pointer-sized value encoded at b. +// The pointer size is dictated by the table being read. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L186. +func (p *pclntanSymbolParser) uintptr(b []byte) uint64 { + if p.ptrSize == 4 { + return uint64(p.byteOrderParser.Uint32(b)) + } + return p.byteOrderParser.Uint64(b) +} + +// getFuncTableFieldSize returns the size of a field in the function table. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L388-L392 +func getFuncTableFieldSize(version version, ptrSize int) int { + if version >= ver118 { + return 4 + } + return ptrSize +} + +// getSymbols returns the symbols from the pclntab section that match the symbol filter. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L300-L329 +func (p *pclntanSymbolParser) getSymbols() (map[string]*elf.Symbol, error) { + numWanted := p.symbolFilter.getNumWanted() + symbols := make(map[string]*elf.Symbol, numWanted) + data := sectionAccess{section: p.section} + for currentIdx := uint32(0); currentIdx < p.funcTableSize; currentIdx++ { + // based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L315 + _, err := p.funcTable.ReadAt(p.funcTableBuffer, int64((2*currentIdx+1)*uint32(p.funcTableFieldSize))) + if err != nil { + continue + } + + // based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L321 + data.baseOffset = int64(p.uint(p.funcTableBuffer)) + p.funcData.baseOffset + funcName := p.funcName(data) + + if funcName == "" { + continue + } + symbols[funcName] = &elf.Symbol{ + Name: funcName, + } + if len(symbols) == numWanted { + break + } + } + if len(symbols) < numWanted { + return symbols, ErrFailedToFindAllSymbols + } + return symbols, nil +} + +// funcName returns the name of the function found at off. +func (p *pclntanSymbolParser) funcName(data sectionAccess) string { + off := funcNameOffset(p.ptrSize, p.cachedVersion, p.byteOrderParser, data, p.ptrBufferSizeHelper) + n, err := p.funcNameTable.ReadAt(p.funcNameHelper, int64(off)) + if n == 0 || (err != nil && !errors.Is(err, io.EOF)) { + return "" + } + idxToNull := bytes.IndexByte(p.funcNameHelper, 0) + if idxToNull == -1 || idxToNull == 0 || idxToNull >= n { + return "" + } + + if p.symbolFilter.want(string(p.funcNameHelper[:idxToNull])) { + return string(p.funcNameHelper[:idxToNull]) + } + return "" +} + +// uint returns the uint stored at b. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L427-L432 +func (p *pclntanSymbolParser) uint(b []byte) uint64 { + if p.funcTableFieldSize == 4 { + return uint64(p.byteOrderParser.Uint32(b)) + } + return p.byteOrderParser.Uint64(b) +} + +// funcNameOffset returns the offset of the function name. +// based on https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L472-L485 +// We can only for the usage of this function for getting the name of the function (https://github.com/golang/go/blob/6a861010be9eed02d5285509cbaf3fb26d2c5041/src/debug/gosym/pclntab.go#L463) +// So we explicitly set `n = 1` in the original implementation. +func funcNameOffset(ptrSize uint32, version version, binary binary.ByteOrder, data sectionAccess, helper []byte) uint32 { + // In Go 1.18, the struct _func has changed. The original (prior to 1.18) was: + // type _func struct { + // entry uintptr + // nameoff int32 + // ... + // } + // In Go 1.18, the struct is: + // type _func struct { + // entryoff uint32 + // nameoff int32 + // ... + // } + // Thus, to read the nameoff, for Go 1.18 and later, we need to skip the entryoff field (4 bytes). + // for Go 1.17 and earlier, We need to skip the sizeof(uintptr) which is ptrSize. + off := ptrSize + if version >= ver118 { + off = 4 + } + // We read only 4 bytes, as the nameoff is an int32. + if n, err := data.ReadAt(helper[:4], int64(off)); err != nil || n != 4 { + return 0 + } + return binary.Uint32(helper[:4]) +} diff --git a/pkg/network/go/bininspect/symbols.go b/pkg/network/go/bininspect/symbols.go index 90dd50c6932d1..910bc37d3ff30 100644 --- a/pkg/network/go/bininspect/symbols.go +++ b/pkg/network/go/bininspect/symbols.go @@ -281,3 +281,19 @@ func GetAnySymbolWithPrefix(elfFile *elf.File, prefix string, maxLength int) (*e // Shouldn't happen return nil, errors.New("empty symbols map") } + +// GetAnySymbolWithPrefixPCLNTAB returns any one symbol with the given prefix and the +// specified maximum length from the pclntab section in ELF file. +func GetAnySymbolWithPrefixPCLNTAB(elfFile *elf.File, prefix string, maxLength int) (*elf.Symbol, error) { + symbols, err := GetPCLNTABSymbolParser(elfFile, newPrefixSymbolFilter(prefix, maxLength)) + if err != nil { + return nil, err + } + + for key := range symbols { + return symbols[key], nil + } + + // Shouldn't happen + return nil, errors.New("empty symbols map") +} diff --git a/pkg/network/nat.go b/pkg/network/nat.go index ef5194b23ffd5..e686c65ea7d83 100644 --- a/pkg/network/nat.go +++ b/pkg/network/nat.go @@ -14,7 +14,7 @@ func GetNATLocalAddress(c ConnectionStats) (util.Address, uint16) { localIP := c.Source localPort := c.SPort - if c.IPTranslation != nil && !c.IPTranslation.ReplDstIP.IsZero() { + if c.IPTranslation != nil && c.IPTranslation.ReplDstIP.IsValid() { // Fields are flipped localIP = c.IPTranslation.ReplDstIP localPort = c.IPTranslation.ReplDstPort @@ -27,7 +27,7 @@ func GetNATRemoteAddress(c ConnectionStats) (util.Address, uint16) { remoteIP := c.Dest remotePort := c.DPort - if c.IPTranslation != nil && !c.IPTranslation.ReplSrcIP.IsZero() { + if c.IPTranslation != nil && c.IPTranslation.ReplSrcIP.IsValid() { // Fields are flipped remoteIP = c.IPTranslation.ReplSrcIP remotePort = c.IPTranslation.ReplSrcPort diff --git a/pkg/network/netlink/conntracker.go b/pkg/network/netlink/conntracker.go index 9f066d2f8cb97..885e4f5900b82 100644 --- a/pkg/network/netlink/conntracker.go +++ b/pkg/network/netlink/conntracker.go @@ -12,7 +12,6 @@ import ( "context" "errors" "fmt" - "net" "net/netip" "sync" "time" @@ -194,8 +193,8 @@ func (ctr *realConntracker) GetTranslationForConn(c *network.ConnectionStats) *n defer ctr.Unlock() k := connKey{ - src: netip.AddrPortFrom(ipFromAddr(c.Source), c.SPort), - dst: netip.AddrPortFrom(ipFromAddr(c.Dest), c.DPort), + src: netip.AddrPortFrom(c.Source.Addr, c.SPort), + dst: netip.AddrPortFrom(c.Dest.Addr, c.DPort), transport: c.Type, } @@ -226,8 +225,8 @@ func (ctr *realConntracker) DeleteTranslation(c *network.ConnectionStats) { defer ctr.Unlock() k := connKey{ - src: netip.AddrPortFrom(ipFromAddr(c.Source), c.SPort), - dst: netip.AddrPortFrom(ipFromAddr(c.Dest), c.DPort), + src: netip.AddrPortFrom(c.Source.Addr, c.SPort), + dst: netip.AddrPortFrom(c.Dest.Addr, c.DPort), transport: c.Type, } @@ -453,29 +452,13 @@ func IsNAT(c Con) bool { func formatIPTranslation(tuple *ConTuple) *network.IPTranslation { return &network.IPTranslation{ - ReplSrcIP: addrFromIP(tuple.Src.Addr()), - ReplDstIP: addrFromIP(tuple.Dst.Addr()), + ReplSrcIP: util.Address{Addr: tuple.Src.Addr().Unmap()}, + ReplDstIP: util.Address{Addr: tuple.Dst.Addr().Unmap()}, ReplSrcPort: tuple.Src.Port(), ReplDstPort: tuple.Dst.Port(), } } -func addrFromIP(ip netip.Addr) util.Address { - if ip.Is6() && !ip.Is4In6() { - b := ip.As16() - return util.V6AddressFromBytes(b[:]) - } - b := ip.As4() - return util.V4AddressFromBytes(b[:]) -} - -func ipFromAddr(a util.Address) netip.Addr { - if a.Len() == net.IPv6len { - return netip.AddrFrom16(*(*[16]byte)(a.Bytes())) - } - return netip.AddrFrom4(*(*[4]byte)(a.Bytes())) -} - func formatKey(tuple *ConTuple) (k connKey, ok bool) { ok = true k.src = tuple.Src diff --git a/pkg/network/netlink/decoding.go b/pkg/network/netlink/decoding.go index 003420fb3ca90..aff7d90466fd6 100644 --- a/pkg/network/netlink/decoding.go +++ b/pkg/network/netlink/decoding.go @@ -201,12 +201,12 @@ func ipv4(b []byte) (netip.Addr, error) { if len(b) != 4 { return netip.Addr{}, fmt.Errorf("invalid IPv4 size") } - return netip.AddrFrom4(*(*[4]byte)(b)), nil + return netip.AddrFrom4([4]byte(b)), nil } func ipv6(b []byte) (netip.Addr, error) { if len(b) != 16 { return netip.Addr{}, fmt.Errorf("invalid IPv6 size") } - return netip.AddrFrom16(*(*[16]byte)(b)), nil + return netip.AddrFrom16([16]byte(b)), nil } diff --git a/pkg/network/nettop/main.go b/pkg/network/nettop/main.go index ad28dda34b871..1fb0f4da992ff 100644 --- a/pkg/network/nettop/main.go +++ b/pkg/network/nettop/main.go @@ -14,7 +14,7 @@ import ( "syscall" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" networkConfig "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/tracer" @@ -29,8 +29,8 @@ func main() { os.Exit(1) } - config.Datadog().SetConfigFile(*cfgpath) - if _, err := config.LoadWithoutSecret(); err != nil { + pkgconfigsetup.Datadog().SetConfigFile(*cfgpath) + if _, err := pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil); err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) os.Exit(1) } diff --git a/pkg/network/protocols/ebpf_types.go b/pkg/network/protocols/ebpf_types.go index ab293e741290b..9fcf544843b86 100644 --- a/pkg/network/protocols/ebpf_types.go +++ b/pkg/network/protocols/ebpf_types.go @@ -47,17 +47,17 @@ const ( ProgramHTTP2Termination ProgramType = C.PROG_HTTP2_TERMINATION // ProgramKafka is the Golang representation of the C.PROG_KAFKA enum ProgramKafka ProgramType = C.PROG_KAFKA - // ProgramKafkaResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum - ProgramKafkaResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_V0 - // ProgramKafkaResponsePartitionParserV12 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum - ProgramKafkaResponsePartitionParserV12 ProgramType = C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_V12 - // ProgramKafkaResponseRecordBatchParserV0 is the Golang representation of the C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_v0 enum - ProgramKafkaResponseRecordBatchParserV0 ProgramType = C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V0 - // ProgramKafkaResponseRecordBatchParserV12 is the Golang representation of the C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_v0 enum - ProgramKafkaResponseRecordBatchParserV12 ProgramType = C.PROG_KAFKA_RESPONSE_RECORD_BATCH_PARSER_V12 - // ProgramKafkaProduceResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum + // ProgramKafkaFetchResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0 enum + ProgramKafkaFetchResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V0 + // ProgramKafkaFetchResponsePartitionParserV12 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12 enum + ProgramKafkaFetchResponsePartitionParserV12 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_PARTITION_PARSER_V12 + // ProgramKafkaFetchResponseRecordBatchParserV0 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0 enum + ProgramKafkaFetchResponseRecordBatchParserV0 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V0 + // ProgramKafkaFetchResponseRecordBatchParserV12 is the Golang representation of the C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12 enum + ProgramKafkaFetchResponseRecordBatchParserV12 ProgramType = C.PROG_KAFKA_FETCH_RESPONSE_RECORD_BATCH_PARSER_V12 + // ProgramKafkaProduceResponsePartitionParserV0 is the Golang representation of the C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0 enum ProgramKafkaProduceResponsePartitionParserV0 ProgramType = C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V0 - // ProgramKafkaProduceResponsePartitionParserV9 is the Golang representation of the C.PROG_KAFKA_RESPONSE_PARTITION_PARSER_v0 enum + // ProgramKafkaProduceResponsePartitionParserV9 is the Golang representation of the C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9 enum ProgramKafkaProduceResponsePartitionParserV9 ProgramType = C.PROG_KAFKA_PRODUCE_RESPONSE_PARTITION_PARSER_V9 // ProgramKafkaTermination is tail call to process Kafka termination. ProgramKafkaTermination ProgramType = C.PROG_KAFKA_TERMINATION diff --git a/pkg/network/protocols/ebpf_types_linux.go b/pkg/network/protocols/ebpf_types_linux.go index 24e84cd1b3511..9cc859e489174 100644 --- a/pkg/network/protocols/ebpf_types_linux.go +++ b/pkg/network/protocols/ebpf_types_linux.go @@ -36,13 +36,13 @@ const ( ProgramKafka ProgramType = 0x9 - ProgramKafkaResponsePartitionParserV0 ProgramType = 0xa + ProgramKafkaFetchResponsePartitionParserV0 ProgramType = 0xa - ProgramKafkaResponsePartitionParserV12 ProgramType = 0xb + ProgramKafkaFetchResponsePartitionParserV12 ProgramType = 0xb - ProgramKafkaResponseRecordBatchParserV0 ProgramType = 0xc + ProgramKafkaFetchResponseRecordBatchParserV0 ProgramType = 0xc - ProgramKafkaResponseRecordBatchParserV12 ProgramType = 0xd + ProgramKafkaFetchResponseRecordBatchParserV12 ProgramType = 0xd ProgramKafkaProduceResponsePartitionParserV0 ProgramType = 0xe diff --git a/pkg/network/protocols/http/etw_http_service.go b/pkg/network/protocols/http/etw_http_service.go index ee913ae4b7c8d..8225553e059d3 100644 --- a/pkg/network/protocols/http/etw_http_service.go +++ b/pkg/network/protocols/http/etw_http_service.go @@ -1472,7 +1472,7 @@ func ipAndPortFromTup(tup driver.ConnTupleType, local bool) ([16]uint8, uint16) } func ip4format(ip [16]uint8) string { - ipObj := netip.AddrFrom4(*(*[4]byte)(ip[:4])) + ipObj := netip.AddrFrom4([4]byte(ip[:])) return ipObj.String() } diff --git a/pkg/network/protocols/http/model_linux.go b/pkg/network/protocols/http/model_linux.go index 4855bee8a00af..75034b59fb921 100644 --- a/pkg/network/protocols/http/model_linux.go +++ b/pkg/network/protocols/http/model_linux.go @@ -110,7 +110,7 @@ func (e *EbpfEvent) String() string { func requestFragment(fragment []byte) [BufferSize]byte { if len(fragment) >= BufferSize { - return *(*[BufferSize]byte)(fragment) + return [BufferSize]byte(fragment) } var b [BufferSize]byte copy(b[:], fragment) diff --git a/pkg/network/protocols/http/statkeeper_test_linux.go b/pkg/network/protocols/http/statkeeper_test_linux.go index ba8f1aa4308a8..3a4c59dfeb675 100644 --- a/pkg/network/protocols/http/statkeeper_test_linux.go +++ b/pkg/network/protocols/http/statkeeper_test_linux.go @@ -25,9 +25,9 @@ func generateIPv4HTTPTransaction(source util.Address, dest util.Address, sourceP event.Http.Response_last_seen = event.Http.Request_started + latencyNS event.Http.Response_status_code = uint16(code) event.Http.Request_fragment = requestFragment([]byte(reqFragment)) - event.Tuple.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Bytes())) + event.Tuple.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Unmap().AsSlice())) event.Tuple.Sport = uint16(sourcePort) - event.Tuple.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Bytes())) + event.Tuple.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Unmap().AsSlice())) event.Tuple.Dport = uint16(destPort) event.Tuple.Metadata = 1 diff --git a/pkg/network/protocols/http/statkeeper_test_windows.go b/pkg/network/protocols/http/statkeeper_test_windows.go index ea4fdc13f00bb..8516d667d41dd 100644 --- a/pkg/network/protocols/http/statkeeper_test_windows.go +++ b/pkg/network/protocols/http/statkeeper_test_windows.go @@ -25,11 +25,10 @@ func generateIPv4HTTPTransaction(source util.Address, dest util.Address, sourceP tx.Txn.ResponseStatusCode = uint16(code) tx.RequestFragment = []byte(reqFragment) - source.WriteTo(tx.Txn.Tup.RemoteAddr[:]) - + copy(tx.Txn.Tup.RemoteAddr[:], source.AsSlice()) tx.Txn.Tup.RemotePort = uint16(sourcePort) - dest.WriteTo(tx.Txn.Tup.LocalAddr[:]) + copy(tx.Txn.Tup.LocalAddr[:], dest.AsSlice()) tx.Txn.Tup.LocalPort = uint16(destPort) return &tx diff --git a/pkg/network/protocols/kafka/protocol.go b/pkg/network/protocols/kafka/protocol.go index 542d97367e9ce..41d0dc72b3854 100644 --- a/pkg/network/protocols/kafka/protocol.go +++ b/pkg/network/protocols/kafka/protocol.go @@ -40,12 +40,12 @@ const ( eventStreamName = "kafka" filterTailCall = "socket__kafka_filter" - responsePartitionParserV0TailCall = "socket__kafka_response_partition_parser_v0" - responsePartitionParserV12TailCall = "socket__kafka_response_partition_parser_v12" - responseRecordBatchParserV0TailCall = "socket__kafka_response_record_batch_parser_v0" - responseRecordBatchParserV12TailCall = "socket__kafka_response_record_batch_parser_v12" - ProduceResponsePartitionParserV0TailCall = "socket__kafka_produce_response_partition_parser_v0" - ProduceResponsePartitionParserV9TailCall = "socket__kafka_produce_response_partition_parser_v9" + fetchResponsePartitionParserV0TailCall = "socket__kafka_fetch_response_partition_parser_v0" + fetchResponsePartitionParserV12TailCall = "socket__kafka_fetch_response_partition_parser_v12" + fetchResponseRecordBatchParserV0TailCall = "socket__kafka_fetch_response_record_batch_parser_v0" + fetchResponseRecordBatchParserV12TailCall = "socket__kafka_fetch_response_record_batch_parser_v12" + produceResponsePartitionParserV0TailCall = "socket__kafka_produce_response_partition_parser_v0" + produceResponsePartitionParserV9TailCall = "socket__kafka_produce_response_partition_parser_v9" dispatcherTailCall = "socket__protocol_dispatcher_kafka" kafkaHeapMap = "kafka_heap" @@ -55,12 +55,12 @@ const ( tlsFilterTailCall = "uprobe__kafka_tls_filter" - tlsResponsePartitionParserV0TailCall = "uprobe__kafka_tls_response_partition_parser_v0" - tlsResponsePartitionParserV12TailCall = "uprobe__kafka_tls_response_partition_parser_v12" - tlsResponseRecordBatchParserV0TailCall = "uprobe__kafka_tls_response_record_batch_parser_v0" - tlsResponseRecordBatchParserV12TailCall = "uprobe__kafka_tls_response_record_batch_parser_v12" - tlsProduceResponsePartitionParserV0TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v0" - tlsProduceResponsePartitionParserV9TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v9" + tlsFetchResponsePartitionParserV0TailCall = "uprobe__kafka_tls_fetch_response_partition_parser_v0" + tlsFetchResponsePartitionParserV12TailCall = "uprobe__kafka_tls_fetch_response_partition_parser_v12" + tlsFetchResponseRecordBatchParserV0TailCall = "uprobe__kafka_tls_fetch_response_record_batch_parser_v0" + tlsFetchResponseRecordBatchParserV12TailCall = "uprobe__kafka_tls_fetch_response_record_batch_parser_v12" + tlsProduceResponsePartitionParserV0TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v0" + tlsProduceResponsePartitionParserV9TailCall = "uprobe__kafka_tls_produce_response_partition_parser_v9" tlsTerminationTailCall = "uprobe__kafka_tls_termination" tlsDispatcherTailCall = "uprobe__tls_protocol_dispatcher_kafka" @@ -110,44 +110,44 @@ var Spec = &protocols.ProtocolSpec{ }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponsePartitionParserV0), + Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: responsePartitionParserV0TailCall, + EBPFFuncName: fetchResponsePartitionParserV0TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponsePartitionParserV12), + Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV12), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: responsePartitionParserV12TailCall, + EBPFFuncName: fetchResponsePartitionParserV12TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV0), + Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: responseRecordBatchParserV0TailCall, + EBPFFuncName: fetchResponseRecordBatchParserV0TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV12), + Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV12), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: responseRecordBatchParserV12TailCall, + EBPFFuncName: fetchResponseRecordBatchParserV12TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: ProduceResponsePartitionParserV0TailCall, + EBPFFuncName: produceResponsePartitionParserV0TailCall, }, }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, Key: uint32(protocols.ProgramKafkaProduceResponsePartitionParserV9), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: ProduceResponsePartitionParserV9TailCall, + EBPFFuncName: produceResponsePartitionParserV9TailCall, }, }, { @@ -166,30 +166,30 @@ var Spec = &protocols.ProtocolSpec{ }, { ProgArrayName: protocols.TLSDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponsePartitionParserV0), + Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: tlsResponsePartitionParserV0TailCall, + EBPFFuncName: tlsFetchResponsePartitionParserV0TailCall, }, }, { ProgArrayName: protocols.TLSDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponsePartitionParserV12), + Key: uint32(protocols.ProgramKafkaFetchResponsePartitionParserV12), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: tlsResponsePartitionParserV12TailCall, + EBPFFuncName: tlsFetchResponsePartitionParserV12TailCall, }, }, { ProgArrayName: protocols.TLSDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV0), + Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV0), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: tlsResponseRecordBatchParserV0TailCall, + EBPFFuncName: tlsFetchResponseRecordBatchParserV0TailCall, }, }, { ProgArrayName: protocols.TLSDispatcherProgramsMap, - Key: uint32(protocols.ProgramKafkaResponseRecordBatchParserV12), + Key: uint32(protocols.ProgramKafkaFetchResponseRecordBatchParserV12), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: tlsResponseRecordBatchParserV12TailCall, + EBPFFuncName: tlsFetchResponseRecordBatchParserV12TailCall, }, }, { diff --git a/pkg/network/protocols/kafka/statkeeper_test.go b/pkg/network/protocols/kafka/statkeeper_test.go index dc99ff01902d3..21170f2ec8d92 100644 --- a/pkg/network/protocols/kafka/statkeeper_test.go +++ b/pkg/network/protocols/kafka/statkeeper_test.go @@ -135,9 +135,9 @@ func generateKafkaTransaction(source util.Address, dest util.Address, sourcePort event.Transaction.Records_count = recordsCount event.Transaction.Topic_name_size = uint8(len(topicName)) event.Transaction.Topic_name = topicNameFromString([]byte(topicName)) - event.Tup.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Bytes())) + event.Tup.Saddr_l = uint64(binary.LittleEndian.Uint32(source.Unmap().AsSlice())) event.Tup.Sport = uint16(sourcePort) - event.Tup.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Bytes())) + event.Tup.Daddr_l = uint64(binary.LittleEndian.Uint32(dest.Unmap().AsSlice())) event.Tup.Dport = uint16(destPort) event.Tup.Metadata = 1 diff --git a/pkg/network/protocols/postgres/debugging/debugging.go b/pkg/network/protocols/postgres/debugging/debugging.go index 0522a2b2a7c23..0e98b7bdf824f 100644 --- a/pkg/network/protocols/postgres/debugging/debugging.go +++ b/pkg/network/protocols/postgres/debugging/debugging.go @@ -20,11 +20,11 @@ type address struct { Port uint16 } -// key represents a (client, server, table name) tuple. +// key represents a (client, server, parameters: table name or runtime parameter) tuple. type key struct { - Client address - Server address - TableName string + Client address + Server address + Parameters string } // Stats consolidates request count and latency information for a certain status code @@ -58,7 +58,7 @@ func Postgres(stats map[postgres.Key]*postgres.RequestStat) []RequestSummary { IP: serverAddr.String(), Port: k.DstPort, }, - TableName: k.TableName, + Parameters: k.Parameters, } if _, ok := resMap[tempKey]; !ok { resMap[tempKey] = make(map[string]Stats) diff --git a/pkg/network/protocols/postgres/model_linux.go b/pkg/network/protocols/postgres/model_linux.go index 0221dbe144f6b..45881f761204d 100644 --- a/pkg/network/protocols/postgres/model_linux.go +++ b/pkg/network/protocols/postgres/model_linux.go @@ -21,16 +21,21 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) +const ( + // EmptyParameters represents the case where the non-empty query has no parameters + EmptyParameters = "EMPTY_PARAMETERS" +) + // EventWrapper wraps an ebpf event and provides additional methods to extract information from it. // We use this wrapper to avoid recomputing the same values (operation and table name) multiple times. type EventWrapper struct { *ebpf.EbpfEvent - operationSet bool - operation Operation - tableNameSet bool - tableName string - normalizer *sqllexer.Normalizer + operationSet bool + operation Operation + parametersSet bool + parameters string + normalizer *sqllexer.Normalizer } // NewEventWrapper creates a new EventWrapper from an ebpf event. @@ -73,6 +78,25 @@ func (e *EventWrapper) Operation() Operation { return e.operation } +// extractParameters returns the string following the command +func (e *EventWrapper) extractParameters() string { + b := getFragment(&e.Tx) + idxParam := bytes.IndexByte(b, ' ') // trim the string to a space, it will give the parameter + if idxParam == -1 { + return EmptyParameters + } + idxParam++ + + idxEnd := bytes.IndexByte(b[idxParam:], '\x00') // trim trailing nulls + if idxEnd == 0 { + return EmptyParameters + } + if idxEnd != -1 { + return string(b[idxParam : idxParam+idxEnd]) + } + return string(b[idxParam:]) +} + var re = regexp.MustCompile(`(?i)if\s+exists`) // extractTableName extracts the table name from the query. @@ -97,14 +121,18 @@ func (e *EventWrapper) extractTableName() string { } -// TableName returns the name of the table the query is operating on. -func (e *EventWrapper) TableName() string { - if !e.tableNameSet { - e.tableName = e.extractTableName() - e.tableNameSet = true +// Parameters returns the table name or run-time parameter. +func (e *EventWrapper) Parameters() string { + if !e.parametersSet { + if e.operation == ShowOP { + e.parameters = e.extractParameters() + } else { + e.parameters = e.extractTableName() + } + e.parametersSet = true } - return e.tableName + return e.parameters } // RequestLatency returns the latency of the request in nanoseconds @@ -125,6 +153,6 @@ ebpfTx{ // String returns a string representation of the underlying event func (e *EventWrapper) String() string { var output strings.Builder - output.WriteString(fmt.Sprintf(template, e.Operation(), e.TableName(), e.RequestLatency())) + output.WriteString(fmt.Sprintf(template, e.Operation(), e.Parameters(), e.RequestLatency())) return output.String() } diff --git a/pkg/network/protocols/postgres/model_linux_test.go b/pkg/network/protocols/postgres/model_linux_test.go index f3888f9945c7e..da81378a4928a 100644 --- a/pkg/network/protocols/postgres/model_linux_test.go +++ b/pkg/network/protocols/postgres/model_linux_test.go @@ -77,7 +77,7 @@ func BenchmarkExtractTableName(b *testing.B) { func requestFragment(fragment []byte) [ebpf.BufferSize]byte { if len(fragment) >= ebpf.BufferSize { - return *(*[ebpf.BufferSize]byte)(fragment) + return [ebpf.BufferSize]byte(fragment) } var b [ebpf.BufferSize]byte copy(b[:], fragment) diff --git a/pkg/network/protocols/postgres/operations.go b/pkg/network/protocols/postgres/operations.go index 49ed0349bee3c..41bc164fd2e20 100644 --- a/pkg/network/protocols/postgres/operations.go +++ b/pkg/network/protocols/postgres/operations.go @@ -29,6 +29,8 @@ const ( AlterTableOP // TruncateTableOP represents a TRUNCATE operation. TruncateTableOP + // ShowOP represents a command SHOW + ShowOP ) // String returns the string representation of the operation. @@ -50,6 +52,8 @@ func (op Operation) String() string { return "DELETE" case AlterTableOP: return "ALTER" + case ShowOP: + return "SHOW" default: return "UNKNOWN" } @@ -74,6 +78,8 @@ func FromString(op string) Operation { return DeleteTableOP case "ALTER": return AlterTableOP + case "SHOW": + return ShowOP default: return UnknownOP } diff --git a/pkg/network/protocols/postgres/stats_common.go b/pkg/network/protocols/postgres/stats_common.go index 073f0466b764a..07f39b1dbaccc 100644 --- a/pkg/network/protocols/postgres/stats_common.go +++ b/pkg/network/protocols/postgres/stats_common.go @@ -18,17 +18,17 @@ import ( // Key is an identifier for a group of Postgres transactions type Key struct { - Operation Operation - TableName string + Operation Operation + Parameters string types.ConnectionKey } // NewKey creates a new postgres key -func NewKey(saddr, daddr util.Address, sport, dport uint16, operation Operation, tableName string) Key { +func NewKey(saddr, daddr util.Address, sport, dport uint16, operation Operation, parameters string) Key { return Key{ ConnectionKey: types.NewConnectionKey(saddr, daddr, sport, dport), Operation: operation, - TableName: tableName, + Parameters: parameters, } } diff --git a/pkg/network/protocols/postgres/statskeeper.go b/pkg/network/protocols/postgres/statskeeper.go index 5b35e3dfe910b..069b18c660d75 100644 --- a/pkg/network/protocols/postgres/statskeeper.go +++ b/pkg/network/protocols/postgres/statskeeper.go @@ -37,7 +37,7 @@ func (s *StatKeeper) Process(tx *EventWrapper) { key := Key{ Operation: tx.Operation(), - TableName: tx.TableName(), + Parameters: tx.Parameters(), ConnectionKey: tx.ConnTuple(), } requestStats, ok := s.stats[key] diff --git a/pkg/network/protocols/postgres/statskeeper_test.go b/pkg/network/protocols/postgres/statskeeper_test.go index 0acf4dc862f36..7510e2edeccf9 100644 --- a/pkg/network/protocols/postgres/statskeeper_test.go +++ b/pkg/network/protocols/postgres/statskeeper_test.go @@ -28,16 +28,16 @@ func TestStatKeeperProcess(t *testing.T) { Response_last_seen: 10, }, }, - operationSet: true, - operation: SelectOP, - tableNameSet: true, - tableName: "dummy", + operationSet: true, + operation: SelectOP, + parametersSet: true, + parameters: "dummy", }) } require.Equal(t, 1, len(s.stats)) for k, stat := range s.stats { - require.Equal(t, "dummy", k.TableName) + require.Equal(t, "dummy", k.Parameters) require.Equal(t, SelectOP, k.Operation) require.Equal(t, 20, stat.Count) require.Equal(t, float64(20), stat.Latencies.GetCount()) diff --git a/pkg/network/protocols/postgres/telemetry.go b/pkg/network/protocols/postgres/telemetry.go index bfdd93030c078..349c8cee77c05 100644 --- a/pkg/network/protocols/postgres/telemetry.go +++ b/pkg/network/protocols/postgres/telemetry.go @@ -22,18 +22,81 @@ const ( numberOfBucketsSmallerThanMaxBufferSize = 3 ) +type counterStateEnum int + +const ( + tableAndOperation counterStateEnum = iota + 1 + operationNotFound + tableNameNotFound + tableAndOpNotFound +) + +// extractionFailureCounter stores counter when goal was achieved and counter when target not found. +type extractionFailureCounter struct { + // countTableAndOperationFound counts the number of successfully retrieved table name and operation. + countTableAndOperationFound *libtelemetry.Counter + // countOperationNotFound counts the number of unsuccessful fetches of the operation. + countOperationNotFound *libtelemetry.Counter + // countTableNameNotFound counts the number of unsuccessful fetches of the table name. + countTableNameNotFound *libtelemetry.Counter + // countTableAndOpNotFound counts the number of failed attempts to fetch both the table name and the operation. + countTableAndOpNotFound *libtelemetry.Counter +} + +// newExtractionFailureCounter creates and returns a new instance +func newExtractionFailureCounter(metricGroup *libtelemetry.MetricGroup, metricName string, tags ...string) *extractionFailureCounter { + return &extractionFailureCounter{ + countTableAndOperationFound: metricGroup.NewCounter(metricName, append(tags, "state:table_and_op")...), + countOperationNotFound: metricGroup.NewCounter(metricName, append(tags, "state:no_operation")...), + countTableNameNotFound: metricGroup.NewCounter(metricName, append(tags, "state:no_table_name")...), + countTableAndOpNotFound: metricGroup.NewCounter(metricName, append(tags, "state:no_table_no_op")...), + } +} + +// inc increments the appropriate counter based on the provided state. +func (c *extractionFailureCounter) inc(state counterStateEnum) { + switch state { + case tableAndOperation: + c.countTableAndOperationFound.Add(1) + case operationNotFound: + c.countOperationNotFound.Add(1) + case tableNameNotFound: + c.countTableNameNotFound.Add(1) + case tableAndOpNotFound: + c.countTableAndOpNotFound.Add(1) + default: + log.Errorf("unable to increment extractionFailureCounter due to undefined state: %v\n", state) + } +} + +// get returns the counter value based on the result. +func (c *extractionFailureCounter) get(state counterStateEnum) int64 { + switch state { + case tableAndOperation: + return c.countTableAndOperationFound.Get() + case operationNotFound: + return c.countOperationNotFound.Get() + case tableNameNotFound: + return c.countTableNameNotFound.Get() + case tableAndOpNotFound: + return c.countTableAndOpNotFound.Get() + default: + return 0 + } +} + // Telemetry is a struct to hold the telemetry for the postgres protocol type Telemetry struct { metricGroup *libtelemetry.MetricGroup // queryLengthBuckets holds the counters for the different buckets of by the query length quires - queryLengthBuckets [numberOfBuckets]*libtelemetry.Counter + queryLengthBuckets [numberOfBuckets]*extractionFailureCounter // failedTableNameExtraction holds the counter for the failed table name extraction failedTableNameExtraction *libtelemetry.Counter // failedOperationExtraction holds the counter for the failed operation extraction failedOperationExtraction *libtelemetry.Counter // firstBucketLowerBoundary is the lower boundary of the first bucket. - // We add 1 in order to include BufferSize as the upper boundary of the third bucket. + // We inc 1 in order to include BufferSize as the upper boundary of the third bucket. // Then the first three buckets will include query lengths shorter or equal to BufferSize, // and the rest will include sizes equal to or above the buffer size. firstBucketLowerBoundary int @@ -51,10 +114,10 @@ type Telemetry struct { // Bucket 7: BufferSize + 4*bucketLength + 1 to BufferSize + 5*bucketLength // Bucket 8: BufferSize + 5*bucketLength + 1 to BufferSize + 6*bucketLength // Bucket 9: BufferSize + 6*bucketLength + 1 to BufferSize + 7*bucketLength -func createQueryLengthBuckets(metricGroup *libtelemetry.MetricGroup) [numberOfBuckets]*libtelemetry.Counter { - var buckets [numberOfBuckets]*libtelemetry.Counter +func createQueryLengthBuckets(metricGroup *libtelemetry.MetricGroup) [numberOfBuckets]*extractionFailureCounter { + var buckets [numberOfBuckets]*extractionFailureCounter for i := 0; i < numberOfBuckets; i++ { - buckets[i] = metricGroup.NewCounter("query_length_bucket"+fmt.Sprint(i+1), libtelemetry.OptStatsd) + buckets[i] = newExtractionFailureCounter(metricGroup, "query_length_bucket"+fmt.Sprint(i+1), libtelemetry.OptStatsd) } return buckets } @@ -88,16 +151,22 @@ func (t *Telemetry) getBucketIndex(querySize int) int { func (t *Telemetry) Count(tx *ebpf.EbpfEvent, eventWrapper *EventWrapper) { querySize := int(tx.Tx.Original_query_size) - bucketIndex := t.getBucketIndex(querySize) - if bucketIndex >= 0 && bucketIndex < len(t.queryLengthBuckets) { - t.queryLengthBuckets[bucketIndex].Add(1) - } - + state := tableAndOperation if eventWrapper.Operation() == UnknownOP { t.failedOperationExtraction.Add(1) + state = operationNotFound } - if eventWrapper.TableName() == "UNKNOWN" { + if eventWrapper.Parameters() == "UNKNOWN" { t.failedTableNameExtraction.Add(1) + if state == operationNotFound { + state = tableAndOpNotFound + } else { + state = tableNameNotFound + } + } + bucketIndex := t.getBucketIndex(querySize) + if bucketIndex >= 0 && bucketIndex < len(t.queryLengthBuckets) { + t.queryLengthBuckets[bucketIndex].inc(state) } } diff --git a/pkg/network/protocols/postgres/telemetry_test.go b/pkg/network/protocols/postgres/telemetry_test.go index f1e5f9cf58a04..aa8aa6843e373 100644 --- a/pkg/network/protocols/postgres/telemetry_test.go +++ b/pkg/network/protocols/postgres/telemetry_test.go @@ -22,6 +22,7 @@ type telemetryResults struct { queryLength [bucketLength]int64 failedTableNameExtraction int64 failedOperationExtraction int64 + counterState counterStateEnum } func Test_getBucketIndex(t *testing.T) { @@ -81,6 +82,7 @@ func TestTelemetry_Count(t *testing.T) { queryLength: [bucketLength]int64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, failedOperationExtraction: 10, failedTableNameExtraction: 10, + counterState: tableAndOpNotFound, }, }, { @@ -103,6 +105,7 @@ func TestTelemetry_Count(t *testing.T) { queryLength: [bucketLength]int64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, failedOperationExtraction: 10, failedTableNameExtraction: 10, + counterState: tableAndOpNotFound, }, }, { @@ -125,6 +128,7 @@ func TestTelemetry_Count(t *testing.T) { queryLength: [bucketLength]int64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, failedOperationExtraction: 10, failedTableNameExtraction: 10, + counterState: tableAndOpNotFound, }, }, { @@ -134,6 +138,7 @@ func TestTelemetry_Count(t *testing.T) { expectedTelemetry: telemetryResults{ failedOperationExtraction: 1, queryLength: [bucketLength]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + counterState: operationNotFound, }, }, { @@ -143,6 +148,7 @@ func TestTelemetry_Count(t *testing.T) { expectedTelemetry: telemetryResults{ failedTableNameExtraction: 1, queryLength: [bucketLength]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + counterState: tableNameNotFound, }, }, { @@ -153,6 +159,7 @@ func TestTelemetry_Count(t *testing.T) { failedTableNameExtraction: 1, failedOperationExtraction: 1, queryLength: [bucketLength]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + counterState: tableAndOpNotFound, }, }, } @@ -188,7 +195,15 @@ func createEbpfEvent(querySize int) *ebpf.EbpfEvent { func verifyTelemetry(t *testing.T, tel *Telemetry, expected telemetryResults) { for i := 0; i < len(tel.queryLengthBuckets); i++ { - assert.Equal(t, expected.queryLength[i], tel.queryLengthBuckets[i].Get(), "queryLength for bucket %d count is incorrect", i) + expState := expected.counterState + expCount := expected.queryLength[i] + curCount := tel.queryLengthBuckets[i].get(expState) + + assert.Equal(t, + expCount, + curCount, + "queryLength bucket '%d': expected state '%v', expected counter '%d', actual counter '%d'", + i, expState, expCount, curCount) } assert.Equal(t, expected.failedTableNameExtraction, tel.failedTableNameExtraction.Get(), "failedTableNameExtraction count is incorrect") assert.Equal(t, expected.failedOperationExtraction, tel.failedOperationExtraction.Get(), "failedOperationExtraction count is incorrect") diff --git a/pkg/network/protocols/tls/gotls/testutil/helpers.go b/pkg/network/protocols/tls/gotls/testutil/helpers.go index fcf418ae505a9..2315779aa184a 100644 --- a/pkg/network/protocols/tls/gotls/testutil/helpers.go +++ b/pkg/network/protocols/tls/gotls/testutil/helpers.go @@ -8,11 +8,34 @@ package testutil import ( + "slices" + "testing" + + "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/network/config" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" + "github.com/DataDog/datadog-agent/pkg/util/kernel" +) + +const ( + fedoraPlatform = "fedora" ) +var fedoraUnsupportedVersions = []string{"35", "36", "37", "38"} + +// isFedora returns true if the current OS is Fedora. +// go-tls does not work correctly on Fedora 35, 36, 37 and 38. +func isFedora(t *testing.T) bool { + platform, err := kernel.Platform() + require.NoError(t, err) + platformVersion, err := kernel.PlatformVersion() + require.NoError(t, err) + + return platform == fedoraPlatform && slices.Contains(fedoraUnsupportedVersions, platformVersion) +} + // GoTLSSupported returns true if GO-TLS monitoring is supported on the current OS. -func GoTLSSupported(cfg *config.Config) bool { - return usmconfig.TLSSupported(cfg) && (cfg.EnableRuntimeCompiler || cfg.EnableCORE) +func GoTLSSupported(t *testing.T, cfg *config.Config) bool { + return usmconfig.TLSSupported(cfg) && (cfg.EnableRuntimeCompiler || cfg.EnableCORE) && !isFedora(t) } diff --git a/pkg/network/protocols/tls/nodejs/testdata/package.json b/pkg/network/protocols/tls/nodejs/testdata/package.json index d544e62306e42..18ab31594327b 100644 --- a/pkg/network/protocols/tls/nodejs/testdata/package.json +++ b/pkg/network/protocols/tls/nodejs/testdata/package.json @@ -1,5 +1,5 @@ { - "name": "nodejs-https-server", + "name": "test@nodejs-https-server", "dependencies": { "dd-trace": "^5.21.0" } diff --git a/pkg/network/route_cache.go b/pkg/network/route_cache.go index 3537a4c2678f5..44a675b0f649b 100644 --- a/pkg/network/route_cache.go +++ b/pkg/network/route_cache.go @@ -169,10 +169,10 @@ func (c *routeCache) Get(source, dest util.Address, netns uint32) (Route, bool) func newRouteKey(source, dest util.Address, netns uint32) routeKey { k := routeKey{netns: netns, source: source, dest: dest} - switch dest.Len() { - case 4: + switch { + case dest.Is4(): k.connFamily = AFINET - case 16: + case dest.Is6(): k.connFamily = AFINET6 } return k diff --git a/pkg/network/state_test.go b/pkg/network/state_test.go index 41d29919ae6f6..d4015c6a972ca 100644 --- a/pkg/network/state_test.go +++ b/pkg/network/state_test.go @@ -22,7 +22,7 @@ import ( "go.uber.org/atomic" "go4.org/intern" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network/dns" "github.com/DataDog/datadog-agent/pkg/network/protocols" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" @@ -2859,8 +2859,8 @@ func TestDNSPIDCollision(t *testing.T) { }, } - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", false) state := newDefaultState() state.RegisterClient("foo") diff --git a/pkg/network/tracer/cached_conntrack.go b/pkg/network/tracer/cached_conntrack.go index 51c691423f840..de4f6cec213f1 100644 --- a/pkg/network/tracer/cached_conntrack.go +++ b/pkg/network/tracer/cached_conntrack.go @@ -10,7 +10,6 @@ package tracer import ( "errors" "fmt" - "net" "net/netip" "os" "sync" @@ -21,7 +20,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/netlink" - "github.com/DataDog/datadog-agent/pkg/process/util" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -65,13 +63,6 @@ func (cache *cachedConntrack) Exists(c *network.ConnectionStats) (bool, error) { return cache.exists(c, c.NetNS, int(c.Pid)) } -func ipFromAddr(a util.Address) netip.Addr { - if a.Len() == net.IPv6len { - return netip.AddrFrom16(*(*[16]byte)(a.Bytes())) - } - return netip.AddrFrom4(*(*[4]byte)(a.Bytes())) -} - func (cache *cachedConntrack) exists(c *network.ConnectionStats, netns uint32, pid int) (bool, error) { ctrk, err := cache.ensureConntrack(uint64(netns), pid) if err != nil { @@ -89,8 +80,8 @@ func (cache *cachedConntrack) exists(c *network.ConnectionStats, netns uint32, p conn := netlink.Con{ Origin: netlink.ConTuple{ - Src: netip.AddrPortFrom(ipFromAddr(c.Source), c.SPort), - Dst: netip.AddrPortFrom(ipFromAddr(c.Dest), c.DPort), + Src: netip.AddrPortFrom(c.Source.Unmap(), c.SPort), + Dst: netip.AddrPortFrom(c.Dest.Unmap(), c.DPort), Proto: protoNumber, }, } diff --git a/pkg/network/tracer/connection/dump.go b/pkg/network/tracer/connection/dump.go index 101ab0d084cb6..a9a9b1aff1337 100644 --- a/pkg/network/tracer/connection/dump.go +++ b/pkg/network/tracer/connection/dump.go @@ -23,7 +23,7 @@ import ( ) -func dumpMapsHandler(w io.Writer, manager *manager.Manager, mapName string, currentMap *ebpf.Map) { +func dumpMapsHandler(w io.Writer, _ *manager.Manager, mapName string, currentMap *ebpf.Map) { switch mapName { case "connectsock_ipv6": // maps/connectsock_ipv6 (BPF_MAP_TYPE_HASH), key C.__u64, value uintptr // C.void* @@ -89,6 +89,21 @@ func dumpMapsHandler(w io.Writer, manager *manager.Manager, mapName string, curr spew.Fdump(w, key, value) } + case probes.TCPOngoingConnectPid: // maps/tcp_ongoing_connect_pid (BPF_MAP_TYPE_HASH), key SkpConnTuple, value u64 + io.WriteString(w, "Map: '"+mapName+"', key: 'SkpConnTuple', value: 'C.u64'\n") + io.WriteString(w, "This map is used to store the PID of the process that initiated the connection\n") + totalSize := 0 + info, _ := currentMap.Info() + spew.Fdump(w, info) + iter := currentMap.Iterate() + var key ddebpf.SkpConn + var value ddebpf.PidTs + for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { + totalSize++ + spew.Fdump(w, key.Tup, value) + } + io.WriteString(w, "Total entries: "+spew.Sdump(totalSize)) + case probes.ConnCloseBatchMap: // maps/conn_close_batch (BPF_MAP_TYPE_HASH), key C.__u32, value batch io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u32', value: 'batch'\n") iter := currentMap.Iterate() diff --git a/pkg/network/tracer/connection/ebpf_tracer.go b/pkg/network/tracer/connection/ebpf_tracer.go index e46d8dd242161..4792e6f2aca5a 100644 --- a/pkg/network/tracer/connection/ebpf_tracer.go +++ b/pkg/network/tracer/connection/ebpf_tracer.go @@ -46,42 +46,53 @@ const ( connTracerModuleName = "network_tracer__ebpf" ) +var tcpOngoingConnectMapTTL = 30 * time.Minute.Nanoseconds() + var EbpfTracerTelemetry = struct { connections telemetry.Gauge tcpFailedConnects *prometheus.Desc - TcpSentMiscounts *prometheus.Desc + TcpSentMiscounts *prometheus.Desc unbatchedTcpClose *prometheus.Desc unbatchedUdpClose *prometheus.Desc UdpSendsProcessed *prometheus.Desc - UdpSendsMissed *prometheus.Desc - UdpDroppedConns *prometheus.Desc + UdpSendsMissed *prometheus.Desc + UdpDroppedConns *prometheus.Desc // doubleFlushAttemptsClose is a counter measuring the number of attempts to flush a closed connection twice from tcp_close doubleFlushAttemptsClose *prometheus.Desc // doubleFlushAttemptsDone is a counter measuring the number of attempts to flush a closed connection twice from tcp_done doubleFlushAttemptsDone *prometheus.Desc // unsupportedTcpFailures is a counter measuring the number of attempts to flush a TCP failure that is not supported unsupportedTcpFailures *prometheus.Desc - // tcpDonePidMismatch is a counter measuring the number of TCP connections with a PID mismatch between tcp_connect and tcp_done - tcpDonePidMismatch *prometheus.Desc - PidCollisions *telemetry.StatCounterWrapper - iterationDups telemetry.Counter - iterationAborts telemetry.Counter + // tcpDoneMissingPid is a counter measuring the number of TCP connections with a PID mismatch between tcp_connect and tcp_done + tcpDoneMissingPid *prometheus.Desc + tcpConnectFailedTuple *prometheus.Desc + tcpDoneFailedTuple *prometheus.Desc + tcpFinishConnectFailedTuple *prometheus.Desc + tcpCloseTargetFailures *prometheus.Desc + ongoingConnectPidCleaned telemetry.Counter + PidCollisions *telemetry.StatCounterWrapper + iterationDups telemetry.Counter + iterationAborts telemetry.Counter lastTcpFailedConnects *atomic.Int64 - LastTcpSentMiscounts *atomic.Int64 + LastTcpSentMiscounts *atomic.Int64 lastUnbatchedTcpClose *atomic.Int64 lastUnbatchedUdpClose *atomic.Int64 lastUdpSendsProcessed *atomic.Int64 - lastUdpSendsMissed *atomic.Int64 - lastUdpDroppedConns *atomic.Int64 + lastUdpSendsMissed *atomic.Int64 + lastUdpDroppedConns *atomic.Int64 // lastDoubleFlushAttemptsClose is a counter measuring the diff between the last two values of doubleFlushAttemptsClose lastDoubleFlushAttemptsClose *atomic.Int64 // lastDoubleFlushAttemptsDone is a counter measuring the diff between the last two values of doubleFlushAttemptsDone lastDoubleFlushAttemptsDone *atomic.Int64 // lastUnsupportedTcpFailures is a counter measuring the diff between the last two values of unsupportedTcpFailures lastUnsupportedTcpFailures *atomic.Int64 - // lastTcpDonePidMismatch is a counter measuring the diff between the last two values of tcpDonePidMismatch - lastTcpDonePidMismatch *atomic.Int64 + // lastTcpDoneMissingPid is a counter measuring the diff between the last two values of tcpDoneMissingPid + lastTcpDoneMissingPid *atomic.Int64 + lastTcpConnectFailedTuple *atomic.Int64 + lastTcpDoneFailedTuple *atomic.Int64 + lastTcpFinishConnectFailedTuple *atomic.Int64 + lastTcpCloseTargetFailures *atomic.Int64 }{ telemetry.NewGauge(connTracerModuleName, "connections", []string{"ip_proto", "family"}, "Gauge measuring the number of active connections in the EBPF map"), prometheus.NewDesc(connTracerModuleName+"__tcp_failed_connects", "Counter measuring the number of failed TCP connections in the EBPF map", nil, nil), @@ -94,7 +105,12 @@ var EbpfTracerTelemetry = struct { prometheus.NewDesc(connTracerModuleName+"__double_flush_attempts_close", "Counter measuring the number of attempts to flush a closed connection twice from tcp_close", nil, nil), prometheus.NewDesc(connTracerModuleName+"__double_flush_attempts_done", "Counter measuring the number of attempts to flush a closed connection twice from tcp_done", nil, nil), prometheus.NewDesc(connTracerModuleName+"__unsupported_tcp_failures", "Counter measuring the number of attempts to flush a TCP failure that is not supported", nil, nil), - prometheus.NewDesc(connTracerModuleName+"__tcp_done_pid_mismatch", "Counter measuring the number of TCP connections with a PID mismatch between tcp_connect and tcp_done", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_done_missing_pid", "Counter measuring the number of TCP connections with a missing PID in tcp_done", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_connect_failed_tuple", "Counter measuring the number of failed TCP connections due to tuple collisions", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_done_failed_tuple", "Counter measuring the number of failed TCP connections due to tuple collisions", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_finish_connect_failed_tuple", "Counter measuring the number of failed TCP connections due to tuple collisions", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_close_target_failures", "Counter measuring the number of failed TCP connections in tcp_close", nil, nil), + telemetry.NewCounter(connTracerModuleName, "ongoing_connect_pid_cleaned", []string{}, "Counter measuring the number of tcp_ongoing_connect_pid entries cleaned in userspace"), telemetry.NewStatCounterWrapper(connTracerModuleName, "pid_collisions", []string{}, "Counter measuring number of process collisions"), telemetry.NewCounter(connTracerModuleName, "iteration_dups", []string{}, "Counter measuring the number of connections iterated more than once"), telemetry.NewCounter(connTracerModuleName, "iteration_aborts", []string{}, "Counter measuring how many times ebpf iteration of connection map was aborted"), @@ -109,6 +125,10 @@ var EbpfTracerTelemetry = struct { atomic.NewInt64(0), atomic.NewInt64(0), atomic.NewInt64(0), + atomic.NewInt64(0), + atomic.NewInt64(0), + atomic.NewInt64(0), + atomic.NewInt64(0), } type ebpfTracer struct { @@ -124,6 +144,9 @@ type ebpfTracer struct { // tcp failure events failedConnConsumer *failure.TCPFailedConnConsumer + // periodically clean the ongoing connection pid map + ongoingConnectCleaner *ddebpf.MapCleaner[netebpf.SkpConn, netebpf.PidTs] + removeTuple *netebpf.ConnTuple closeTracer func() @@ -161,6 +184,7 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace probes.UDPPortBindingsMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, probes.ConnectionProtocolMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, probes.ConnectionTupleToSocketSKBConnMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.TCPOngoingConnectPid: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, }, ConstantEditors: []manager.ConstantEditor{ boolConst("tcpv6_enabled", config.CollectTCPv6Conns), @@ -244,6 +268,8 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace ch: newCookieHasher(), } + tr.setupMapCleaner(m) + tr.conns, err = maps.GetMap[netebpf.ConnTuple, netebpf.ConnStats](m, probes.ConnMap) if err != nil { tr.Stop() @@ -329,6 +355,7 @@ func (t *ebpfTracer) Stop() { _ = t.m.Stop(manager.CleanAll) t.closeConsumer.Stop() t.failedConnConsumer.Stop() + t.ongoingConnectCleaner.Stop() if t.closeTracer != nil { t.closeTracer() } @@ -501,7 +528,11 @@ func (t *ebpfTracer) Describe(ch chan<- *prometheus.Desc) { ch <- EbpfTracerTelemetry.doubleFlushAttemptsClose ch <- EbpfTracerTelemetry.doubleFlushAttemptsDone ch <- EbpfTracerTelemetry.unsupportedTcpFailures - ch <- EbpfTracerTelemetry.tcpDonePidMismatch + ch <- EbpfTracerTelemetry.tcpDoneMissingPid + ch <- EbpfTracerTelemetry.tcpConnectFailedTuple + ch <- EbpfTracerTelemetry.tcpDoneFailedTuple + ch <- EbpfTracerTelemetry.tcpFinishConnectFailedTuple + ch <- EbpfTracerTelemetry.tcpCloseTargetFailures } // Collect returns the current state of all metrics of the collector @@ -550,10 +581,25 @@ func (t *ebpfTracer) Collect(ch chan<- prometheus.Metric) { EbpfTracerTelemetry.lastUnsupportedTcpFailures.Store(int64(ebpfTelemetry.Unsupported_tcp_failures)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.unsupportedTcpFailures, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Tcp_done_pid_mismatch) - EbpfTracerTelemetry.lastTcpDonePidMismatch.Load() - EbpfTracerTelemetry.lastTcpDonePidMismatch.Store(int64(ebpfTelemetry.Tcp_done_pid_mismatch)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpDonePidMismatch, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Tcp_done_missing_pid) - EbpfTracerTelemetry.lastTcpDoneMissingPid.Load() + EbpfTracerTelemetry.lastTcpDoneMissingPid.Store(int64(ebpfTelemetry.Tcp_done_missing_pid)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpDoneMissingPid, prometheus.CounterValue, float64(delta)) + + delta = int64(ebpfTelemetry.Tcp_connect_failed_tuple) - EbpfTracerTelemetry.lastTcpConnectFailedTuple.Load() + EbpfTracerTelemetry.lastTcpConnectFailedTuple.Store(int64(ebpfTelemetry.Tcp_connect_failed_tuple)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpConnectFailedTuple, prometheus.CounterValue, float64(delta)) + + delta = int64(ebpfTelemetry.Tcp_done_failed_tuple) - EbpfTracerTelemetry.lastTcpDoneFailedTuple.Load() + EbpfTracerTelemetry.lastTcpDoneFailedTuple.Store(int64(ebpfTelemetry.Tcp_done_failed_tuple)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpDoneFailedTuple, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Tcp_finish_connect_failed_tuple) - EbpfTracerTelemetry.lastTcpFinishConnectFailedTuple.Load() + EbpfTracerTelemetry.lastTcpFinishConnectFailedTuple.Store(int64(ebpfTelemetry.Tcp_finish_connect_failed_tuple)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpFinishConnectFailedTuple, prometheus.CounterValue, float64(delta)) + + delta = int64(ebpfTelemetry.Tcp_close_target_failures) - EbpfTracerTelemetry.lastTcpCloseTargetFailures.Load() + EbpfTracerTelemetry.lastTcpCloseTargetFailures.Store(int64(ebpfTelemetry.Tcp_close_target_failures)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpCloseTargetFailures, prometheus.CounterValue, float64(delta)) } // DumpMaps (for debugging purpose) returns all maps content by default or selected maps from maps parameter. @@ -645,6 +691,31 @@ func (t *ebpfTracer) getTCPStats(stats *netebpf.TCPStats, tuple *netebpf.ConnTup return t.tcpStats.Lookup(tuple, stats) == nil } +// setupMapCleaner sets up a map cleaner for the tcp_ongoing_connect_pid map +func (t *ebpfTracer) setupMapCleaner(m *manager.Manager) { + tcpOngoingConnectPidMap, _, err := m.GetMap(probes.TCPOngoingConnectPid) + if err != nil { + log.Errorf("error getting %v map: %s", probes.TCPOngoingConnectPid, err) + return + } + + tcpOngoingConnectPidCleaner, err := ddebpf.NewMapCleaner[netebpf.SkpConn, netebpf.PidTs](tcpOngoingConnectPidMap, 1024) + if err != nil { + log.Errorf("error creating map cleaner: %s", err) + return + } + tcpOngoingConnectPidCleaner.Clean(time.Minute*5, nil, nil, func(now int64, _ netebpf.SkpConn, val netebpf.PidTs) bool { + ts := int64(val.Timestamp) + expired := ts > 0 && now-ts > tcpOngoingConnectMapTTL + if expired { + EbpfTracerTelemetry.ongoingConnectPidCleaned.Inc() + } + return expired + }) + + t.ongoingConnectCleaner = tcpOngoingConnectPidCleaner +} + func populateConnStats(stats *network.ConnectionStats, t *netebpf.ConnTuple, s *netebpf.ConnStats, ch *cookieHasher) { *stats = network.ConnectionStats{ Pid: t.Pid, diff --git a/pkg/network/tracer/connection/ebpfless_tracer.go b/pkg/network/tracer/connection/ebpfless_tracer.go index b2cd928ceddfb..2c467a114aa4b 100644 --- a/pkg/network/tracer/connection/ebpfless_tracer.go +++ b/pkg/network/tracer/connection/ebpfless_tracer.go @@ -20,6 +20,7 @@ import ( "github.com/vishvananda/netns" "golang.org/x/sys/unix" + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/filter" @@ -213,7 +214,11 @@ func (t *ebpfLessTracer) processConnection( } if conn.Type == network.UDP || conn.Monotonic.TCPEstablished > 0 { - conn.LastUpdateEpoch = uint64(time.Now().UnixNano()) + var ts int64 + if ts, err = ddebpf.NowNanoseconds(); err != nil { + return fmt.Errorf("error getting last updated timestamp for connection: %w", err) + } + conn.LastUpdateEpoch = uint64(ts) t.conns[key] = conn } diff --git a/pkg/network/tracer/connection/failure/failed_conn_consumer.go b/pkg/network/tracer/connection/failure/failed_conn_consumer.go index b82dbd5afc76b..4df1a90f29705 100644 --- a/pkg/network/tracer/connection/failure/failed_conn_consumer.go +++ b/pkg/network/tracer/connection/failure/failed_conn_consumer.go @@ -57,7 +57,7 @@ func (c *TCPFailedConnConsumer) Stop() { c.once.Do(func() { close(c.closed) }) - c.FailedConns.mapCleaner.Stop() + c.FailedConns.connCloseFlushedCleaner.Stop() } func (c *TCPFailedConnConsumer) extractConn(data []byte) { diff --git a/pkg/network/tracer/connection/failure/matching.go b/pkg/network/tracer/connection/failure/matching.go index d02f4ddc0ebbb..4ad1c8ff31c1d 100644 --- a/pkg/network/tracer/connection/failure/matching.go +++ b/pkg/network/tracer/connection/failure/matching.go @@ -26,18 +26,20 @@ import ( ) var ( - telemetryModuleName = "network_tracer__tcp_failure" - mapTTL = 10 * time.Millisecond.Nanoseconds() + telemetryModuleName = "network_tracer__tcp_failure" + connClosedFlushMapTTL = 10 * time.Millisecond.Nanoseconds() ) var failureTelemetry = struct { - failedConnMatches telemetry.Counter - failedConnOrphans telemetry.Counter - failedConnsDropped telemetry.Counter + failedConnMatches telemetry.Counter + failedConnOrphans telemetry.Counter + failedConnsDropped telemetry.Counter + closedConnFlushedCleaned telemetry.Counter }{ telemetry.NewCounter(telemetryModuleName, "matches", []string{"type"}, "Counter measuring the number of successful matches of failed connections with closed connections"), telemetry.NewCounter(telemetryModuleName, "orphans", []string{}, "Counter measuring the number of orphans after associating failed connections with a closed connection"), telemetry.NewCounter(telemetryModuleName, "dropped", []string{}, "Counter measuring the number of dropped failed connections"), + telemetry.NewCounter(telemetryModuleName, "closed_conn_flushed_cleaned", []string{}, "Counter measuring the number of conn_close_flushed entries cleaned in userspace"), } // FailedConnStats is a wrapper to help document the purpose of the underlying map @@ -58,10 +60,10 @@ type FailedConnMap map[ebpf.ConnTuple]*FailedConnStats // FailedConns is a struct to hold failed connections type FailedConns struct { - FailedConnMap map[ebpf.ConnTuple]*FailedConnStats - maxFailuresBuffered uint32 - failureTuple *ebpf.ConnTuple - mapCleaner *ddebpf.MapCleaner[ebpf.ConnTuple, int64] + FailedConnMap map[ebpf.ConnTuple]*FailedConnStats + maxFailuresBuffered uint32 + failureTuple *ebpf.ConnTuple + connCloseFlushedCleaner *ddebpf.MapCleaner[ebpf.ConnTuple, int64] sync.Mutex } @@ -159,8 +161,12 @@ func (fc *FailedConns) setupMapCleaner(m *manager.Manager) { } mapCleaner.Clean(time.Second*1, nil, nil, func(now int64, _ ebpf.ConnTuple, val int64) bool { - return val > 0 && now-val > mapTTL + expired := val > 0 && now-val > connClosedFlushMapTTL + if expired { + failureTelemetry.closedConnFlushedCleaned.Inc() + } + return expired }) - fc.mapCleaner = mapCleaner + fc.connCloseFlushedCleaner = mapCleaner } diff --git a/pkg/network/tracer/connection/fentry/manager.go b/pkg/network/tracer/connection/fentry/manager.go index 5bf409712b057..29cb5ac920bfe 100644 --- a/pkg/network/tracer/connection/fentry/manager.go +++ b/pkg/network/tracer/connection/fentry/manager.go @@ -20,7 +20,7 @@ func initManager(mgr *ddebpf.Manager, connCloseEventHandler ddebpf.EventHandler, mgr.Maps = []*manager.Map{ {Name: probes.ConnMap}, {Name: probes.TCPStatsMap}, - {Name: probes.TCPConnectSockPidMap}, + {Name: probes.TCPOngoingConnectPid}, {Name: probes.ConnCloseFlushed}, {Name: probes.ConnCloseBatchMap}, {Name: "udp_recv_sock"}, diff --git a/pkg/network/tracer/connection/kprobe/config.go b/pkg/network/tracer/connection/kprobe/config.go index 4f119bdbf7bec..880a2f0a5e838 100644 --- a/pkg/network/tracer/connection/kprobe/config.go +++ b/pkg/network/tracer/connection/kprobe/config.go @@ -14,8 +14,25 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" "github.com/DataDog/datadog-agent/pkg/util/kernel" + "github.com/DataDog/datadog-agent/pkg/util/log" ) +// After kernel 6.5.0, tcp_sendpage and udp_sendpage are removed. +// We used to only check for kv < 6.5.0 here - however, OpenSUSE 15.6 backported +// this change into 6.4.0 to pick up a CVE so the version number is not reliable. +// Instead, we directly check if the function exists. +func hasTCPSendPage(kv kernel.Version) bool { + missing, err := ebpf.VerifyKernelFuncs("tcp_sendpage") + if err == nil { + return len(missing) == 0 + } + + log.Debugf("unable to determine whether tcp_sendpage exists, using kernel version instead: %s", err) + + kv650 := kernel.VersionCode(6, 5, 0) + return kv < kv650 +} + func enableProbe(enabled map[probes.ProbeFuncName]struct{}, name probes.ProbeFuncName) { enabled[name] = struct{}{} } @@ -30,12 +47,14 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes kv4180 := kernel.VersionCode(4, 18, 0) kv5180 := kernel.VersionCode(5, 18, 0) kv5190 := kernel.VersionCode(5, 19, 0) - kv650 := kernel.VersionCode(6, 5, 0) + kv, err := kernel.HostVersion() if err != nil { return nil, err } + hasSendPage := hasTCPSendPage(kv) + if c.CollectTCPv4Conns || c.CollectTCPv6Conns { if ClassificationSupported(c) { enableProbe(enabled, probes.ProtocolClassifierEntrySocketFilter) @@ -47,7 +66,7 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes } enableProbe(enabled, selectVersionBasedProbe(runtimeTracer, kv, probes.TCPSendMsg, probes.TCPSendMsgPre410, kv410)) enableProbe(enabled, probes.TCPSendMsgReturn) - if kv < kv650 { + if hasSendPage { enableProbe(enabled, probes.TCPSendPage) enableProbe(enabled, probes.TCPSendPageReturn) } @@ -59,10 +78,8 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes enableProbe(enabled, probes.TCPClose) enableProbe(enabled, probes.TCPCloseFlushReturn) enableProbe(enabled, probes.TCPConnect) - if c.FailedConnectionsSupported() && (runtimeTracer || coreTracer) { - enableProbe(enabled, probes.TCPDone) - enableProbe(enabled, probes.TCPDoneFlushReturn) - } + enableProbe(enabled, probes.TCPDone) + enableProbe(enabled, probes.TCPDoneFlushReturn) enableProbe(enabled, probes.TCPFinishConnect) enableProbe(enabled, probes.InetCskAcceptReturn) enableProbe(enabled, probes.InetCskListenStop) @@ -81,7 +98,7 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes enableProbe(enabled, probes.IPMakeSkbReturn) enableProbe(enabled, probes.InetBind) enableProbe(enabled, probes.InetBindRet) - if kv < kv650 { + if hasSendPage { enableProbe(enabled, probes.UDPSendPage) enableProbe(enabled, probes.UDPSendPageReturn) } @@ -114,7 +131,7 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes enableProbe(enabled, probes.IP6MakeSkbReturn) enableProbe(enabled, probes.Inet6Bind) enableProbe(enabled, probes.Inet6BindRet) - if kv < kv650 { + if hasSendPage { enableProbe(enabled, probes.UDPSendPage) enableProbe(enabled, probes.UDPSendPageReturn) } diff --git a/pkg/network/tracer/connection/kprobe/manager.go b/pkg/network/tracer/connection/kprobe/manager.go index 4582c276b01f6..e8c1448e9d11c 100644 --- a/pkg/network/tracer/connection/kprobe/manager.go +++ b/pkg/network/tracer/connection/kprobe/manager.go @@ -65,7 +65,7 @@ func initManager(mgr *ddebpf.Manager, connCloseEventHandler ddebpf.EventHandler, mgr.Maps = []*manager.Map{ {Name: probes.ConnMap}, {Name: probes.TCPStatsMap}, - {Name: probes.TCPConnectSockPidMap}, + {Name: probes.TCPOngoingConnectPid}, {Name: probes.ConnCloseFlushed}, {Name: probes.ConnCloseBatchMap}, {Name: "udp_recv_sock"}, diff --git a/pkg/network/tracer/connection/util/conn_tracer.go b/pkg/network/tracer/connection/util/conn_tracer.go index b97bf1272ae65..724e60ec4c4eb 100644 --- a/pkg/network/tracer/connection/util/conn_tracer.go +++ b/pkg/network/tracer/connection/util/conn_tracer.go @@ -163,10 +163,10 @@ func ConnStatsToTuple(c *network.ConnectionStats, tup *netebpf.ConnTuple) { } else { tup.SetType(netebpf.UDP) } - if !c.Source.IsZero() { + if c.Source.IsValid() { tup.Saddr_l, tup.Saddr_h = util.ToLowHigh(c.Source) } - if !c.Dest.IsZero() { + if c.Dest.IsValid() { tup.Daddr_l, tup.Daddr_h = util.ToLowHigh(c.Dest) } } diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go index fded4ce4d70c0..735b26f55db9a 100644 --- a/pkg/network/tracer/tracer.go +++ b/pkg/network/tracer/tracer.go @@ -40,6 +40,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/process/util" timeresolver "github.com/DataDog/datadog-agent/pkg/security/resolvers/time" "github.com/DataDog/datadog-agent/pkg/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/ec2" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -66,14 +67,12 @@ var tracerTelemetry = struct { closedConns *telemetry.StatCounterWrapper connStatsMapSize telemetry.Gauge payloadSizePerClient telemetry.Gauge - failedConnOrphans telemetry.Counter }{ telemetry.NewCounter(tracerModuleName, "skipped_conns", []string{"ip_proto"}, "Counter measuring skipped connections"), telemetry.NewCounter(tracerModuleName, "expired_tcp_conns", []string{}, "Counter measuring expired TCP connections"), telemetry.NewStatCounterWrapper(tracerModuleName, "closed_conns", []string{"ip_proto"}, "Counter measuring closed TCP connections"), telemetry.NewGauge(tracerModuleName, "conn_stats_map_size", []string{}, "Gauge measuring the size of the active connections map"), telemetry.NewGauge(tracerModuleName, "payload_conn_count", []string{"client_id", "ip_proto"}, "Gauge measuring the number of connections in the system-probe payload"), - telemetry.NewCounter(tracerModuleName, "failed_conn_orphans", []string{}, "Counter measuring the number of orphans after associating failed connections with a closed connection"), } // Tracer implements the functionality of the network tracer @@ -850,3 +849,17 @@ func newUSMMonitor(c *config.Config, tracer connection.Tracer) *usm.Monitor { return monitor } + +// GetNetworkID retrieves the vpc_id (network_id) from IMDS +func (t *Tracer) GetNetworkID(context context.Context) (string, error) { + id := "" + err := kernel.WithRootNS(kernel.ProcFSRoot(), func() error { + var err error + id, err = ec2.GetNetworkID(context) + return err + }) + if err != nil { + return "", err + } + return id, nil +} diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index f541f252cf8bf..c0aadb5bbe945 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -40,8 +40,8 @@ import ( "go4.org/intern" "golang.org/x/sys/unix" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" @@ -620,9 +620,9 @@ func (s *TracerSuite) TestGatewayLookupNotEnabled() { m.EXPECT().IsAWS().Return(true) network.Cloud = m - clouds := ddconfig.Datadog().Get("cloud_provider_metadata") - ddconfig.Datadog().SetWithoutSource("cloud_provider_metadata", []string{}) - defer ddconfig.Datadog().SetWithoutSource("cloud_provider_metadata", clouds) + clouds := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{}) + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", clouds) tr := setupTracer(t, cfg) require.Nil(t, tr.gwLookup) @@ -1127,31 +1127,61 @@ func (s *TracerSuite) TestSelfConnect() { }, 5*time.Second, 100*time.Millisecond, "could not find expected number of tcp connections, expected: 2") } -func (s *TracerSuite) TestUDPPeekCount() { - t := s.T() - config := testConfig() - tr := setupTracer(t, config) +// sets up two udp sockets talking to each other locally. +// returns (listener, dialer) +func setupUdpSockets(t *testing.T, udpnet, ip string) (*net.UDPConn, *net.UDPConn) { + serverAddr := fmt.Sprintf("%s:%d", ip, 0) - ln, err := net.ListenPacket("udp", "127.0.0.1:0") + laddr, err := net.ResolveUDPAddr(udpnet, serverAddr) require.NoError(t, err) - defer ln.Close() - saddr := ln.LocalAddr().String() + var ln, c *net.UDPConn = nil, nil + t.Cleanup(func() { + if ln != nil { + ln.Close() + } + if c != nil { + c.Close() + } + }) - laddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") + ln, err = net.ListenUDP(udpnet, laddr) require.NoError(t, err) - raddr, err := net.ResolveUDPAddr("udp", saddr) + + saddr := ln.LocalAddr().String() + + raddr, err := net.ResolveUDPAddr(udpnet, saddr) require.NoError(t, err) - c, err := net.DialUDP("udp", laddr, raddr) + c, err = net.DialUDP(udpnet, laddr, raddr) require.NoError(t, err) - defer c.Close() + + return ln, c +} + +func (s *TracerSuite) TestUDPPeekCount() { + t := s.T() + t.Run("v4", func(t *testing.T) { + testUDPPeekCount(t, "udp4", "127.0.0.1") + }) + t.Run("v6", func(t *testing.T) { + if !testConfig().CollectUDPv6Conns { + t.Skip("UDPv6 disabled") + } + testUDPPeekCount(t, "udp6", "[::1]") + }) +} +func testUDPPeekCount(t *testing.T, udpnet, ip string) { + config := testConfig() + tr := setupTracer(t, config) + + ln, c := setupUdpSockets(t, udpnet, ip) msg := []byte("asdf") - _, err = c.Write(msg) + _, err := c.Write(msg) require.NoError(t, err) - rawConn, err := ln.(*net.UDPConn).SyscallConn() + rawConn, err := ln.SyscallConn() require.NoError(t, err) err = rawConn.Control(func(fd uintptr) { buf := make([]byte, 1024) @@ -1204,12 +1234,82 @@ func (s *TracerSuite) TestUDPPeekCount() { m := outgoing.Monotonic require.Equal(t, len(msg), int(m.SentBytes)) require.Equal(t, 0, int(m.RecvBytes)) + require.Equal(t, 1, int(m.SentPackets)) + require.Equal(t, 0, int(m.RecvPackets)) require.True(t, outgoing.IntraHost) // make sure the inverse values are seen for the other message m = incoming.Monotonic require.Equal(t, 0, int(m.SentBytes)) require.Equal(t, len(msg), int(m.RecvBytes)) + require.Equal(t, 0, int(m.SentPackets)) + require.Equal(t, 1, int(m.RecvPackets)) + require.True(t, incoming.IntraHost) +} + +func (s *TracerSuite) TestUDPPacketSumming() { + t := s.T() + t.Run("v4", func(t *testing.T) { + testUDPPacketSumming(t, "udp4", "127.0.0.1") + }) + t.Run("v6", func(t *testing.T) { + if !testConfig().CollectUDPv6Conns { + t.Skip("UDPv6 disabled") + } + testUDPPacketSumming(t, "udp6", "[::1]") + }) +} +func testUDPPacketSumming(t *testing.T, udpnet, ip string) { + config := testConfig() + tr := setupTracer(t, config) + + ln, c := setupUdpSockets(t, udpnet, ip) + + msg := []byte("asdf") + // send UDP packets of increasing length + for i := range msg { + _, err := c.Write(msg[:i+1]) + require.NoError(t, err) + } + expectedBytes := 1 + 2 + 3 + 4 + + buf := make([]byte, 256) + recvBytes := 0 + for range msg { + n, _, err := ln.ReadFrom(buf) + require.NoError(t, err) + recvBytes += n + } + // sanity check: did userspace get all four expected packets? + require.Equal(t, recvBytes, expectedBytes) + + var incoming *network.ConnectionStats + var outgoing *network.ConnectionStats + require.Eventuallyf(t, func() bool { + conns := getConnections(t, tr) + if outgoing == nil { + outgoing, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + } + if incoming == nil { + incoming, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), conns) + } + + return outgoing != nil && incoming != nil + }, 3*time.Second, 100*time.Millisecond, "couldn't find incoming and outgoing connections matching") + + m := outgoing.Monotonic + require.Equal(t, expectedBytes, int(m.SentBytes)) + require.Equal(t, 0, int(m.RecvBytes)) + require.Equal(t, int(len(msg)), int(m.SentPackets)) + require.Equal(t, 0, int(m.RecvPackets)) + require.True(t, outgoing.IntraHost) + + // make sure the inverse values are seen for the other message + m = incoming.Monotonic + require.Equal(t, 0, int(m.SentBytes)) + require.Equal(t, expectedBytes, int(m.RecvBytes)) + require.Equal(t, 0, int(m.SentPackets)) + require.Equal(t, int(len(msg)), int(m.RecvPackets)) require.True(t, incoming.IntraHost) } @@ -1507,10 +1607,18 @@ func (s *TracerSuite) TestSendfileRegression() { }, 3*time.Second, 100*time.Millisecond, "couldn't find connections used by sendfile(2)") if assert.NotNil(t, outConn, "couldn't find outgoing connection used by sendfile(2)") { - assert.Equalf(t, int64(clientMessageSize), int64(outConn.Monotonic.SentBytes), "sendfile send data wasn't properly traced") + assert.Equalf(t, int64(clientMessageSize), int64(outConn.Monotonic.SentBytes), "sendfile sent bytes wasn't properly traced") + if connType == network.UDP { + assert.Equalf(t, int64(1), int64(outConn.Monotonic.SentPackets), "sendfile UDP should send exactly 1 packet") + assert.Equalf(t, int64(0), int64(outConn.Monotonic.RecvPackets), "sendfile outConn shouldn't have any RecvPackets") + } } if assert.NotNil(t, inConn, "couldn't find incoming connection used by sendfile(2)") { - assert.Equalf(t, int64(clientMessageSize), int64(inConn.Monotonic.RecvBytes), "sendfile recv data wasn't properly traced") + assert.Equalf(t, int64(clientMessageSize), int64(inConn.Monotonic.RecvBytes), "sendfile recv bytes wasn't properly traced") + if connType == network.UDP { + assert.Equalf(t, int64(1), int64(inConn.Monotonic.RecvPackets), "sendfile UDP should recv exactly 1 packet") + assert.Equalf(t, int64(0), int64(inConn.Monotonic.SentPackets), "sendfile inConn shouldn't have any SentPackets") + } } } @@ -1541,7 +1649,7 @@ func (s *TracerSuite) TestSendfileRegression() { t.Skip("UDP will fail with prebuilt tracer") } - // Start TCP server + // Start UDP server var rcvd int64 server := &UDPServer{ network: "udp" + strings.TrimPrefix(family.String(), "v"), @@ -2283,8 +2391,8 @@ func checkSkipFailureConnectionsTests(t *testing.T) { if _, ok := failedConnectionsBuildModes[ebpftest.GetBuildMode()]; !ok { t.Skip("Skipping test on unsupported build mode: ", ebpftest.GetBuildMode()) } - } + func (s *TracerSuite) TestTCPFailureConnectionTimeout() { t := s.T() @@ -2337,11 +2445,76 @@ func (s *TracerSuite) TestTCPFailureConnectionTimeout() { localAddr := fmt.Sprintf("127.0.0.1:%d", port) // Check if the connection was recorded as failed due to timeout + var conn *network.ConnectionStats require.Eventually(t, func() bool { conns := getConnections(t, tr) // 110 is the errno for ETIMEDOUT - return findFailedConnection(t, localAddr, srvAddr, conns, 110) - }, 3*time.Second, 1000*time.Millisecond, "Failed connection not recorded properly") + conn = findFailedConnection(t, localAddr, srvAddr, conns, 110) + return conn != nil + }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") + + assert.Equal(t, uint32(0), conn.TCPFailures[104], "expected 0 connection reset") + assert.Equal(t, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") + assert.Equal(t, uint32(1), conn.TCPFailures[110], "expected 1 connection timeout") + assert.Equal(t, uint64(0), conn.Monotonic.SentBytes, "expected 0 bytes sent") + assert.Equal(t, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") +} + +func (s *TracerSuite) TestTCPFailureConnectionResetWithDNAT() { + t := s.T() + + checkSkipFailureConnectionsTests(t) + + cfg := testConfig() + cfg.TCPFailedConnectionsEnabled = true + tr := setupTracer(t, cfg) + + // Setup DNAT to redirect traffic from 2.2.2.2 to 1.1.1.1 + netlinktestutil.SetupDNAT(t) + + // Set up a TCP server on the translated address (1.1.1.1) + srv := tracertestutil.NewTCPServerOnAddress("1.1.1.1:80", func(c net.Conn) { + if tcpConn, ok := c.(*net.TCPConn); ok { + tcpConn.SetLinger(0) + buf := make([]byte, 10) + _, _ = c.Read(buf) + time.Sleep(10 * time.Millisecond) + } + c.Close() + }) + + require.NoError(t, srv.Run(), "error running server") + t.Cleanup(srv.Shutdown) + + // Attempt to connect to the DNAT address (2.2.2.2), which should be redirected to the server at 1.1.1.1 + serverAddr := "2.2.2.2:80" + c, err := net.Dial("tcp", serverAddr) + require.NoError(t, err, "could not connect to server: ", err) + + // Write to the server and expect a reset + _, writeErr := c.Write([]byte("ping")) + if writeErr != nil { + t.Log("Write error:", writeErr) + } + + // Read from server to ensure that the server has a chance to reset the connection + _, readErr := c.Read(make([]byte, 4)) + require.Error(t, readErr, "expected connection reset error but got none") + + // Check if the connection was recorded as reset + var conn *network.ConnectionStats + require.Eventually(t, func() bool { + // 104 is the errno for ECONNRESET + conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, getConnections(t, tr), 104) + return conn != nil + }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") + + require.NoError(t, c.Close(), "error closing client connection") + assert.Equal(t, uint32(1), conn.TCPFailures[104], "expected 1 connection reset") + assert.Equal(t, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") + assert.Equal(t, uint32(0), conn.TCPFailures[110], "expected 0 connection timeout") + assert.Equal(t, uint64(4), conn.Monotonic.SentBytes, "expected 4 bytes sent") + assert.Equal(t, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") } func setupDropTrafficRule(tb testing.TB) (ns string) { diff --git a/pkg/network/tracer/tracer_test.go b/pkg/network/tracer/tracer_test.go index 1e001d5b5ed98..d552f29b95bec 100644 --- a/pkg/network/tracer/tracer_test.go +++ b/pkg/network/tracer/tracer_test.go @@ -33,7 +33,6 @@ import ( "github.com/stretchr/testify/suite" "golang.org/x/sync/errgroup" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network" @@ -77,7 +76,7 @@ func isFentry() bool { func setupTracer(t testing.TB, cfg *config.Config) *Tracer { if isFentry() { - ddconfig.SetFeatures(t, env.ECSFargate) + env.SetFeatures(t, env.ECSFargate) // protocol classification not yet supported on fargate cfg.ProtocolClassificationEnabled = false } @@ -1260,14 +1259,22 @@ func (s *TracerSuite) TestTCPFailureConnectionRefused() { require.Error(t, err, "expected connection refused error but got none") // Check if the connection was recorded as refused + var foundConn *network.ConnectionStats require.Eventually(t, func() bool { conns := getConnections(t, tr) // Check for the refusal record - return findFailedConnectionByRemoteAddr(srvAddr, conns, 111) + foundConn = findFailedConnectionByRemoteAddr(srvAddr, conns, 111) + return foundConn != nil }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") + + assert.Equal(t, uint32(1), foundConn.TCPFailures[111], "expected 1 connection refused") + assert.Equal(t, uint32(0), foundConn.TCPFailures[104], "expected 0 connection reset") + assert.Equal(t, uint32(0), foundConn.TCPFailures[110], "expected 0 connection timeout") + assert.Equal(t, uint64(0), foundConn.Monotonic.SentBytes, "expected 0 bytes sent") + assert.Equal(t, uint64(0), foundConn.Monotonic.RecvBytes, "expected 0 bytes received") } -func (s *TracerSuite) TestTCPFailureConnectionReset() { +func (s *TracerSuite) TestTCPFailureConnectionResetWithData() { t := s.T() checkSkipFailureConnectionsTests(t) @@ -1304,27 +1311,84 @@ func (s *TracerSuite) TestTCPFailureConnectionReset() { require.Error(t, readErr, "expected connection reset error but got none") // Check if the connection was recorded as reset + var conn *network.ConnectionStats + require.Eventually(t, func() bool { + // 104 is the errno for ECONNRESET + conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, getConnections(t, tr), 104) + return conn != nil + }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") + + require.NoError(t, c.Close(), "error closing client connection") + assert.Equal(t, uint32(1), conn.TCPFailures[104], "expected 1 connection reset") + assert.Equal(t, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") + assert.Equal(t, uint32(0), conn.TCPFailures[110], "expected 0 connection timeout") + assert.Equal(t, uint64(4), conn.Monotonic.SentBytes, "expected 4 bytes sent") + assert.Equal(t, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") +} + +func (s *TracerSuite) TestTCPFailureConnectionResetNoData() { + t := s.T() + + checkSkipFailureConnectionsTests(t) + + cfg := testConfig() + cfg.TCPFailedConnectionsEnabled = true + tr := setupTracer(t, cfg) + + // Server that immediately resets the connection without any data transfer + srv := testutil.NewTCPServer(func(c net.Conn) { + if tcpConn, ok := c.(*net.TCPConn); ok { + tcpConn.SetLinger(0) + } + time.Sleep(10 * time.Millisecond) + // Close the connection immediately to trigger a reset + c.Close() + }) + + require.NoError(t, srv.Run(), "error running server") + t.Cleanup(srv.Shutdown) + + serverAddr := srv.Address() + c, err := net.Dial("tcp", serverAddr) + require.NoError(t, err, "could not connect to server: ", err) + + // Wait briefly to give the server time to close the connection + time.Sleep(50 * time.Millisecond) + + // Attempt to write to the server, expecting a reset + _, writeErr := c.Write([]byte("ping")) + require.Error(t, writeErr, "expected connection reset error but got none") + + // Check if the connection was recorded as reset + var conn *network.ConnectionStats require.Eventually(t, func() bool { conns := getConnections(t, tr) // 104 is the errno for ECONNRESET - return findFailedConnection(t, c.LocalAddr().String(), serverAddr, conns, 104) + conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, conns, 104) + return conn != nil }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") require.NoError(t, c.Close(), "error closing client connection") + + assert.Equal(t, uint32(1), conn.TCPFailures[104], "expected 1 connection reset") + assert.Equal(t, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") + assert.Equal(t, uint32(0), conn.TCPFailures[110], "expected 0 connection timeout") + assert.Equal(t, uint64(0), conn.Monotonic.SentBytes, "expected 0 bytes sent") + assert.Equal(t, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") } // findFailedConnection is a utility function to find a failed connection based on specific TCP error codes -func findFailedConnection(t *testing.T, local, remote string, conns *network.Connections, errorCode uint32) bool { // nolint:unused +func findFailedConnection(t *testing.T, local, remote string, conns *network.Connections, errorCode uint32) *network.ConnectionStats { // nolint:unused // Extract the address and port from the net.Addr types localAddrPort, err := netip.ParseAddrPort(local) if err != nil { t.Logf("Failed to parse local address: %v", err) - return false + return nil } remoteAddrPort, err := netip.ParseAddrPort(remote) if err != nil { t.Logf("Failed to parse remote address: %v", err) - return false + return nil } failureFilter := func(cs network.ConnectionStats) bool { @@ -1333,13 +1397,13 @@ func findFailedConnection(t *testing.T, local, remote string, conns *network.Con return localMatch && remoteMatch && cs.TCPFailures[errorCode] > 0 } - return network.FirstConnection(conns, failureFilter) != nil + return network.FirstConnection(conns, failureFilter) } // for some failed connections we don't know the local addr/port so we need to search by remote addr only -func findFailedConnectionByRemoteAddr(remoteAddr string, conns *network.Connections, errorCode uint32) bool { +func findFailedConnectionByRemoteAddr(remoteAddr string, conns *network.Connections, errorCode uint32) *network.ConnectionStats { failureFilter := func(cs network.ConnectionStats) bool { return netip.MustParseAddrPort(remoteAddr) == netip.AddrPortFrom(cs.Dest.Addr, cs.DPort) && cs.TCPFailures[errorCode] > 0 } - return network.FirstConnection(conns, failureFilter) != nil + return network.FirstConnection(conns, failureFilter) } diff --git a/pkg/network/tracer/tracer_unsupported.go b/pkg/network/tracer/tracer_unsupported.go index bdb6abdf3dbf5..f3ef15179c0b7 100644 --- a/pkg/network/tracer/tracer_unsupported.go +++ b/pkg/network/tracer/tracer_unsupported.go @@ -34,6 +34,11 @@ func (t *Tracer) GetActiveConnections(_ string) (*network.Connections, error) { return nil, ebpf.ErrNotImplemented } +// GetNetworkID is not implemented on this OS for Tracer +func (t *Tracer) GetNetworkID(_ context.Context) (string, error) { + return "", ebpf.ErrNotImplemented +} + // RegisterClient registers the client func (t *Tracer) RegisterClient(_ string) error { return ebpf.ErrNotImplemented diff --git a/pkg/network/tracer/tracer_windows.go b/pkg/network/tracer/tracer_windows.go index a4677a19c501a..fba6ea78a95b0 100644 --- a/pkg/network/tracer/tracer_windows.go +++ b/pkg/network/tracer/tracer_windows.go @@ -309,6 +309,11 @@ func (t *Tracer) DebugDumpProcessCache(_ context.Context) (interface{}, error) { return nil, ebpf.ErrNotImplemented } +// GetNetworkID is not implemented on this OS for Tracer +func (t *Tracer) GetNetworkID(_ context.Context) (string, error) { + return "", ebpf.ErrNotImplemented +} + func newUSMMonitor(c *config.Config, dh driver.Handle) usm.Monitor { if !c.EnableHTTPMonitoring && !c.EnableNativeTLSMonitoring { return nil diff --git a/pkg/network/tracer/utils_linux.go b/pkg/network/tracer/utils_linux.go index d072906eec687..afb6ca29b6416 100644 --- a/pkg/network/tracer/utils_linux.go +++ b/pkg/network/tracer/utils_linux.go @@ -14,14 +14,14 @@ import ( "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/features" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) // NeedsEBPF returns `true` if the network-tracer requires eBPF func NeedsEBPF() bool { - return !coreconfig.SystemProbe().GetBool("network_config.enable_ebpfless") + return !pkgconfigsetup.SystemProbe().GetBool("network_config.enable_ebpfless") } // IsTracerSupportedByOS returns whether the current kernel version supports tracer functionality diff --git a/pkg/network/usm/ebpf_gotls.go b/pkg/network/usm/ebpf_gotls.go index b7e75f7c3c964..e0090e3ba889c 100644 --- a/pkg/network/usm/ebpf_gotls.go +++ b/pkg/network/usm/ebpf_gotls.go @@ -42,7 +42,6 @@ import ( const ( offsetsDataMap = "offsets_data" - pidToDeviceInodeMap = "pid_to_device_inode" goTLSReadArgsMap = "go_tls_read_args" goTLSWriteArgsMap = "go_tls_write_args" connectionTupleByGoTLSMap = "conn_tup_by_go_tls_conn" @@ -116,11 +115,6 @@ type goTLSProgram struct { // inodes. offsetsDataMap *ebpf.Map - // eBPF map holding the mapping of PIDs to device/inode numbers. - // On some filesystems (like btrfs), the device-id in the task-struct can be different from the device-id extracted - // in the user-mode. This map is used to ensure the eBPF probes are getting the correct device/inode numbers. - pidToDeviceInodeMap *ebpf.Map - // binAnalysisMetric handles telemetry on the time spent doing binary // analysis binAnalysisMetric *libtelemetry.Counter @@ -137,7 +131,6 @@ var _ utils.Attacher = &goTLSProgram{} var goTLSSpec = &protocols.ProtocolSpec{ Maps: []*manager.Map{ {Name: offsetsDataMap}, - {Name: pidToDeviceInodeMap}, {Name: goTLSReadArgsMap}, {Name: goTLSWriteArgsMap}, {Name: connectionTupleByGoTLSMap}, @@ -223,10 +216,6 @@ func (p *goTLSProgram) PreStart(m *manager.Manager) error { if err != nil { return fmt.Errorf("could not get offsets_data map: %s", err) } - p.pidToDeviceInodeMap, _, err = m.GetMap(pidToDeviceInodeMap) - if err != nil { - return fmt.Errorf("could not get %s map: %s", pidToDeviceInodeMap, err) - } procMonitor := monitor.GetProcessMonitor() cleanupExec := procMonitor.SubscribeExec(p.handleProcessStart) @@ -253,7 +242,7 @@ func (p *goTLSProgram) PreStart(m *manager.Manager) error { processSet := p.registry.GetRegisteredProcesses() deletedPids := monitor.FindDeletedProcesses(processSet) for deletedPid := range deletedPids { - _ = p.DetachPID(deletedPid) + _ = p.registry.Unregister(deletedPid) } } } @@ -289,7 +278,6 @@ var ( // DetachPID detaches the provided PID from the eBPF program. func (p *goTLSProgram) DetachPID(pid uint32) error { - _ = p.pidToDeviceInodeMap.Delete(unsafe.Pointer(&pid)) return p.registry.Unregister(pid) } @@ -350,13 +338,12 @@ func (p *goTLSProgram) AttachPID(pid uint32) error { // Check go process probeList := make([]manager.ProbeIdentificationPair, 0) - return p.registry.Register(binPath, pid, - registerCBCreator(p.manager, p.offsetsDataMap, p.pidToDeviceInodeMap, &probeList, p.binAnalysisMetric, p.binNoSymbolsMetric), - unregisterCBCreator(p.manager, &probeList, p.offsetsDataMap, p.pidToDeviceInodeMap), - alreadyCBCreator(p.pidToDeviceInodeMap)) + return p.registry.Register(binPath, pid, registerCBCreator(p.manager, p.offsetsDataMap, &probeList, p.binAnalysisMetric, p.binNoSymbolsMetric), + unregisterCBCreator(p.manager, &probeList, p.offsetsDataMap), + utils.IgnoreCB) } -func registerCBCreator(mgr *manager.Manager, offsetsDataMap, pidToDeviceInodeMap *ebpf.Map, probeIDs *[]manager.ProbeIdentificationPair, binAnalysisMetric, binNoSymbolsMetric *libtelemetry.Counter) func(path utils.FilePath) error { +func registerCBCreator(mgr *manager.Manager, offsetsDataMap *ebpf.Map, probeIDs *[]manager.ProbeIdentificationPair, binAnalysisMetric, binNoSymbolsMetric *libtelemetry.Counter) func(path utils.FilePath) error { return func(filePath utils.FilePath) error { start := time.Now() @@ -379,13 +366,13 @@ func registerCBCreator(mgr *manager.Manager, offsetsDataMap, pidToDeviceInodeMap return fmt.Errorf("error extracting inspectoin data from %s: %w", filePath.HostPath, err) } - if err := addInspectionResultToMap(offsetsDataMap, pidToDeviceInodeMap, filePath, inspectionResult); err != nil { + if err := addInspectionResultToMap(offsetsDataMap, filePath.ID, inspectionResult); err != nil { return fmt.Errorf("failed adding inspection rules: %w", err) } pIDs, err := attachHooks(mgr, inspectionResult, filePath.HostPath, filePath.ID) if err != nil { - removeInspectionResultFromMap(offsetsDataMap, pidToDeviceInodeMap, filePath) + removeInspectionResultFromMap(offsetsDataMap, filePath.ID) return fmt.Errorf("error while attaching hooks to %s: %w", filePath.HostPath, err) } *probeIDs = pIDs @@ -398,21 +385,6 @@ func registerCBCreator(mgr *manager.Manager, offsetsDataMap, pidToDeviceInodeMap } } -// alreadyCBCreator handles the case where a binary is already registered. In such a case the registry callback won't -// be called, so we need to add a mapping from the PID to the device/inode of the binary. -func alreadyCBCreator(pidToDeviceInodeMap *ebpf.Map) func(utils.FilePath) error { - return func(filePath utils.FilePath) error { - if filePath.PID == 0 { - return nil - } - return pidToDeviceInodeMap.Put(unsafe.Pointer(&filePath.PID), unsafe.Pointer(&gotls.TlsBinaryId{ - Id_major: unix.Major(filePath.ID.Dev), - Id_minor: unix.Minor(filePath.ID.Dev), - Ino: filePath.ID.Inode, - })) - } -} - func (p *goTLSProgram) handleProcessExit(pid pid) { _ = p.DetachPID(pid) } @@ -423,39 +395,32 @@ func (p *goTLSProgram) handleProcessStart(pid pid) { // addInspectionResultToMap runs a binary inspection and adds the result to the // map that's being read by the probes, indexed by the binary's inode number `ino`. -func addInspectionResultToMap(offsetsDataMap, pidToDeviceInodeMap *ebpf.Map, filePath utils.FilePath, result *bininspect.Result) error { +func addInspectionResultToMap(offsetsDataMap *ebpf.Map, binID utils.PathIdentifier, result *bininspect.Result) error { offsetsData, err := inspectionResultToProbeData(result) if err != nil { return fmt.Errorf("error while parsing inspection result: %w", err) } key := &gotls.TlsBinaryId{ - Id_major: unix.Major(filePath.ID.Dev), - Id_minor: unix.Minor(filePath.ID.Dev), - Ino: filePath.ID.Inode, + Id_major: unix.Major(binID.Dev), + Id_minor: unix.Minor(binID.Dev), + Ino: binID.Inode, } if err := offsetsDataMap.Put(unsafe.Pointer(key), unsafe.Pointer(&offsetsData)); err != nil { - return fmt.Errorf("could not write binary inspection result to map for binID %v (pid %v): %w", filePath.ID, filePath.PID, err) + return fmt.Errorf("could not write binary inspection result to map for binID %v: %w", binID, err) } - if err := pidToDeviceInodeMap.Put(unsafe.Pointer(&filePath.PID), unsafe.Pointer(key)); err != nil { - return fmt.Errorf("could not write pid to device/inode (%s) map for pid %v: %w", filePath.ID.String(), filePath.PID, err) - } return nil } -func removeInspectionResultFromMap(offsetsDataMap, pidToDeviceInodeMap *ebpf.Map, filePath utils.FilePath) { +func removeInspectionResultFromMap(offsetsDataMap *ebpf.Map, binID utils.PathIdentifier) { key := &gotls.TlsBinaryId{ - Id_major: unix.Major(filePath.ID.Dev), - Id_minor: unix.Minor(filePath.ID.Dev), - Ino: filePath.ID.Inode, - } - if filePath.PID != 0 { - _ = pidToDeviceInodeMap.Delete(unsafe.Pointer(&filePath.PID)) + Id_major: unix.Major(binID.Dev), + Id_minor: unix.Minor(binID.Dev), + Ino: binID.Inode, } if err := offsetsDataMap.Delete(unsafe.Pointer(key)); err != nil { - log.Errorf("could not remove inspection result from map for ino %v: %s", filePath.ID, err) - return + log.Errorf("could not remove inspection result from map for ino %v: %s", binID, err) } } @@ -510,12 +475,12 @@ func attachHooks(mgr *manager.Manager, result *bininspect.Result, binPath string return probeIDs, nil } -func unregisterCBCreator(mgr *manager.Manager, probeIDs *[]manager.ProbeIdentificationPair, offsetsDataMap, pidToDeviceInodeMap *ebpf.Map) func(path utils.FilePath) error { +func unregisterCBCreator(mgr *manager.Manager, probeIDs *[]manager.ProbeIdentificationPair, offsetsDataMap *ebpf.Map) func(path utils.FilePath) error { return func(path utils.FilePath) error { if len(*probeIDs) == 0 { return nil } - removeInspectionResultFromMap(offsetsDataMap, pidToDeviceInodeMap, path) + removeInspectionResultFromMap(offsetsDataMap, path.ID) for _, probeID := range *probeIDs { err := mgr.DetachHook(probeID) if err != nil { diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go index c5ac048be3a80..1c3c5e12702ca 100644 --- a/pkg/network/usm/kafka_monitor_test.go +++ b/pkg/network/usm/kafka_monitor_test.go @@ -148,7 +148,7 @@ func (s *KafkaProtocolParsingSuite) TestKafkaProtocolParsing() { for mode, name := range map[bool]string{false: "without TLS", true: "with TLS"} { t.Run(name, func(t *testing.T) { - if mode && !gotlsutils.GoTLSSupported(config.New()) { + if mode && !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } for _, version := range versions { @@ -529,19 +529,17 @@ func (s *KafkaProtocolParsingSuite) testKafkaProtocolParsing(t *testing.T, tls b require.NoError(t, proxy.WaitForConnectionReady(unixPath)) cfg := getDefaultTestConfiguration(tls) - monitor := newKafkaMonitor(t, cfg) - if tls && cfg.EnableGoTLSSupport { - utils.WaitForProgramsToBeTraced(t, "go-tls", proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) - } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Cleanup(func() { for _, client := range tt.context.clients { client.Client.Close() } - cleanProtocolMaps(t, "kafka", monitor.ebpfProgram.Manager.Manager) }) - + monitor := newKafkaMonitor(t, cfg) + if tls && cfg.EnableGoTLSSupport { + utils.WaitForProgramsToBeTraced(t, "go-tls", proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) + } tt.testBody(t, &tt.context, monitor) }) } @@ -1244,7 +1242,7 @@ func (s *KafkaProtocolParsingSuite) TestKafkaFetchRaw() { }) t.Run("with TLS", func(t *testing.T) { - if !gotlsutils.GoTLSSupported(config.New()) { + if !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -1470,7 +1468,7 @@ func (s *KafkaProtocolParsingSuite) TestKafkaProduceRaw() { }) t.Run("with TLS", func(t *testing.T) { - if !gotlsutils.GoTLSSupported(config.New()) { + if !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } diff --git a/pkg/network/usm/monitor_tls_test.go b/pkg/network/usm/monitor_tls_test.go index b9df53c92dd16..c706b5fe874d4 100644 --- a/pkg/network/usm/monitor_tls_test.go +++ b/pkg/network/usm/monitor_tls_test.go @@ -548,9 +548,11 @@ func (s *tlsSuite) TestJavaInjection() { } func TestHTTPGoTLSAttachProbes(t *testing.T) { + t.Skip("skipping GoTLS tests while we investigate their flakiness") + modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -569,7 +571,7 @@ func testHTTP2GoTLSAttachProbes(t *testing.T, cfg *config.Config) { if !http2.Supported() { t.Skip("HTTP2 not supported for this setup") } - if !gotlstestutil.GoTLSSupported(cfg) { + if !gotlstestutil.GoTLSSupported(t, cfg) { t.Skip("GoTLS not supported for this setup") } @@ -601,7 +603,7 @@ func TestHTTPSGoTLSAttachProbesOnContainer(t *testing.T) { t.Skip("Skipping a flaky test") modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -619,7 +621,7 @@ func TestOldConnectionRegression(t *testing.T) { modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } @@ -694,7 +696,7 @@ func TestOldConnectionRegression(t *testing.T) { func TestLimitListenerRegression(t *testing.T) { modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } diff --git a/pkg/network/usm/postgres_monitor_test.go b/pkg/network/usm/postgres_monitor_test.go index 47a891cd4040b..94a188ab4d31b 100644 --- a/pkg/network/usm/postgres_monitor_test.go +++ b/pkg/network/usm/postgres_monitor_test.go @@ -137,7 +137,7 @@ func (s *postgresProtocolParsingSuite) TestDecoding() { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if tt.isTLS && !gotlstestutil.GoTLSSupported(config.New()) { + if tt.isTLS && !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } testDecoding(t, tt.isTLS) @@ -478,7 +478,6 @@ func testDecoding(t *testing.T, isTLS bool) { }, isTLS) }, }, - // This test validates that the SHOW command is currently not supported. { name: "show command", preMonitorSetup: func(t *testing.T, ctx pgTestContext) { @@ -497,8 +496,8 @@ func testDecoding(t *testing.T, isTLS bool) { }, validation: func(t *testing.T, _ pgTestContext, monitor *Monitor) { validatePostgres(t, monitor, map[string]map[postgres.Operation]int{ - "UNKNOWN": { - postgres.UnknownOP: adjustCount(1), + "search_path": { + postgres.ShowOP: adjustCount(1), }, }, isTLS) }, @@ -724,11 +723,106 @@ func validatePostgres(t *testing.T, monitor *Monitor, expectedStats map[string]m if hasTLSTag != tls { continue } - if _, ok := found[key.TableName]; !ok { - found[key.TableName] = make(map[postgres.Operation]int) + if _, ok := found[key.Parameters]; !ok { + found[key.Parameters] = make(map[postgres.Operation]int) } - found[key.TableName][key.Operation] += stats.Count + found[key.Parameters][key.Operation] += stats.Count } return reflect.DeepEqual(expectedStats, found) }, time.Second*5, time.Millisecond*100, "Expected to find a %v stats, instead captured %v", &expectedStats, &found) } + +func (s *postgresProtocolParsingSuite) TestExtractParameters() { + t := s.T() + + units := []struct { + name string + expected string + event ebpf.EbpfEvent + }{ + { + name: "query_size longer than the actual length of the content", + expected: "version and status", + event: ebpf.EbpfEvent{ + Tx: ebpf.EbpfTx{ + Request_fragment: createFragment([]byte("SHOW version and status")), + Original_query_size: 64, + }, + }, + }, + { + name: "query_size shorter than the actual length of the content", + expected: "param1 param2", + event: ebpf.EbpfEvent{ + Tx: ebpf.EbpfTx{ + Request_fragment: createFragment([]byte("SHOW param1 param2 param3")), + Original_query_size: 18, + }, + }, + }, + { + name: "the query has no parameters", + expected: postgres.EmptyParameters, + event: ebpf.EbpfEvent{ + Tx: ebpf.EbpfTx{ + Request_fragment: createFragment([]byte("SHOW ")), + Original_query_size: 10, + }, + }, + }, + { + name: "command has trailing zeros", + expected: "param", + event: ebpf.EbpfEvent{ + Tx: ebpf.EbpfTx{ + Request_fragment: [ebpf.BufferSize]byte{'S', 'H', 'O', 'W', ' ', 'p', 'a', 'r', 'a', 'm', 0, 0, 0}, + Original_query_size: 13, + }, + }, + }, + { + name: "malformed command with wrong query_size", + expected: postgres.EmptyParameters, + event: ebpf.EbpfEvent{ + Tx: ebpf.EbpfTx{ + Request_fragment: [ebpf.BufferSize]byte{'S', 'H', 'O', 'W', ' ', 0, 0, 'a', ' ', 'b', 'c', 0, 0, 0}, + Original_query_size: 14, + }, + }, + }, + { + name: "empty parameters with spaces and nils", + expected: postgres.EmptyParameters, + event: ebpf.EbpfEvent{ + Tx: ebpf.EbpfTx{ + Request_fragment: [ebpf.BufferSize]byte{'S', 'H', 'O', 'W', ' ', 0, ' ', 0, ' ', 0, 0, 0}, + Original_query_size: 12, + }, + }, + }, + { + name: "parameters with control codes only", + expected: "\x01\x02\x03\x04\x05", + event: ebpf.EbpfEvent{ + Tx: ebpf.EbpfTx{ + Request_fragment: [ebpf.BufferSize]byte{'S', 'H', 'O', 'W', ' ', 1, 2, 3, 4, 5}, + Original_query_size: 10, + }, + }, + }, + } + for _, unit := range units { + t.Run(unit.name, func(t *testing.T) { + e := postgres.NewEventWrapper(&unit.event) + require.NotNil(t, e) + e.Operation() + require.Equal(t, unit.expected, e.Parameters()) + }) + } +} + +func createFragment(fragment []byte) [ebpf.BufferSize]byte { + var b [ebpf.BufferSize]byte + copy(b[:], fragment) + return b +} diff --git a/pkg/network/usm/sharedlibraries/compile.go b/pkg/network/usm/sharedlibraries/compile.go index b33323d292444..be409112fc7d5 100644 --- a/pkg/network/usm/sharedlibraries/compile.go +++ b/pkg/network/usm/sharedlibraries/compile.go @@ -9,19 +9,19 @@ package sharedlibraries import ( + "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" - "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/process/statsd" ) //go:generate $GOPATH/bin/include_headers pkg/network/ebpf/c/runtime/shared-libraries.c pkg/ebpf/bytecode/build/runtime/shared-libraries.c pkg/ebpf/c pkg/network/ebpf/c/runtime pkg/network/ebpf/c //go:generate $GOPATH/bin/integrity pkg/ebpf/bytecode/build/runtime/shared-libraries.c pkg/ebpf/bytecode/runtime/shared-libraries.go runtime -func getRuntimeCompiledSharedLibraries(config *config.Config) (runtime.CompiledOutput, error) { - return runtime.SharedLibraries.Compile(&config.Config, getCFlags(config), statsd.Client) +func getRuntimeCompiledSharedLibraries(config *ebpf.Config) (runtime.CompiledOutput, error) { + return runtime.SharedLibraries.Compile(config, getCFlags(config), statsd.Client) } -func getCFlags(config *config.Config) []string { +func getCFlags(config *ebpf.Config) []string { cflags := []string{"-g"} if config.BPFDebug { diff --git a/pkg/network/usm/sharedlibraries/ebpf.go b/pkg/network/usm/sharedlibraries/ebpf.go index 3b67a6b14d3a6..4b32417132add 100644 --- a/pkg/network/usm/sharedlibraries/ebpf.go +++ b/pkg/network/usm/sharedlibraries/ebpf.go @@ -12,6 +12,7 @@ import ( "math" "os" "runtime" + "strings" manager "github.com/DataDog/ebpf-manager" "golang.org/x/sys/unix" @@ -19,7 +20,6 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" - "github.com/DataDog/datadog-agent/pkg/network/config" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -38,13 +38,31 @@ const ( var traceTypes = []string{"enter", "exit"} -type ebpfProgram struct { - cfg *config.Config +// EbpfProgram represents the shared libraries eBPF program. +type EbpfProgram struct { + cfg *ddebpf.Config perfHandler *ddebpf.PerfHandler *ddebpf.Manager } -func newEBPFProgram(c *config.Config) *ebpfProgram { +// IsSupported returns true if the shared libraries monitoring is supported on the current system. +func IsSupported(cfg *ddebpf.Config) bool { + kversion, err := kernel.HostVersion() + if err != nil { + log.Warn("could not determine the current kernel version. shared libraries monitoring disabled.") + return false + } + + if strings.HasPrefix(runtime.GOARCH, "arm") { + return kversion >= kernel.VersionCode(5, 5, 0) && (cfg.EnableRuntimeCompiler || cfg.EnableCORE) + } + + // Minimum version for shared libraries monitoring is 4.14 + return kversion >= kernel.VersionCode(4, 14, 0) +} + +// NewEBPFProgram creates a new EBPFProgram to monitor shared libraries +func NewEBPFProgram(c *ddebpf.Config) *EbpfProgram { perfHandler := ddebpf.NewPerfHandler(100) pm := &manager.PerfMap{ Map: manager.Map{ @@ -74,14 +92,15 @@ func newEBPFProgram(c *config.Config) *ebpfProgram { ) } - return &ebpfProgram{ + return &EbpfProgram{ cfg: c, Manager: ddebpf.NewManager(mgr, &ebpftelemetry.ErrorsTelemetryModifier{}), perfHandler: perfHandler, } } -func (e *ebpfProgram) Init() error { +// Init initializes the eBPF program. +func (e *EbpfProgram) Init() error { var err error if e.cfg.EnableCORE { err = e.initCORE() @@ -110,17 +129,19 @@ func (e *ebpfProgram) Init() error { return e.initPrebuilt() } -func (e *ebpfProgram) GetPerfHandler() *ddebpf.PerfHandler { +// GetPerfHandler returns the perf handler +func (e *EbpfProgram) GetPerfHandler() *ddebpf.PerfHandler { return e.perfHandler } -func (e *ebpfProgram) Stop() { +// Stop stops the eBPF program +func (e *EbpfProgram) Stop() { ebpftelemetry.UnregisterTelemetry(e.Manager.Manager) e.Manager.Stop(manager.CleanAll) //nolint:errcheck e.perfHandler.Stop() } -func (e *ebpfProgram) init(buf bytecode.AssetReader, options manager.Options) error { +func (e *EbpfProgram) init(buf bytecode.AssetReader, options manager.Options) error { options.RLimit = &unix.Rlimit{ Cur: math.MaxUint64, Max: math.MaxUint64, @@ -138,12 +159,12 @@ func (e *ebpfProgram) init(buf bytecode.AssetReader, options manager.Options) er return e.InitWithOptions(buf, &options) } -func (e *ebpfProgram) initCORE() error { +func (e *EbpfProgram) initCORE() error { assetName := getAssetName("shared-libraries", e.cfg.BPFDebug) return ddebpf.LoadCOREAsset(assetName, e.init) } -func (e *ebpfProgram) initRuntimeCompiler() error { +func (e *EbpfProgram) initRuntimeCompiler() error { bc, err := getRuntimeCompiledSharedLibraries(e.cfg) if err != nil { return err @@ -152,7 +173,7 @@ func (e *ebpfProgram) initRuntimeCompiler() error { return e.init(bc, manager.Options{}) } -func (e *ebpfProgram) initPrebuilt() error { +func (e *EbpfProgram) initPrebuilt() error { bc, err := netebpf.ReadSharedLibrariesModule(e.cfg.BPFDir, e.cfg.BPFDebug) if err != nil { return err diff --git a/pkg/network/usm/sharedlibraries/testutil/testutil.go b/pkg/network/usm/sharedlibraries/testutil/testutil.go index 1cf1bcee23e3a..80f6832cd2f63 100644 --- a/pkg/network/usm/sharedlibraries/testutil/testutil.go +++ b/pkg/network/usm/sharedlibraries/testutil/testutil.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" protocolstestutil "github.com/DataDog/datadog-agent/pkg/network/protocols/testutil" usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" + "github.com/DataDog/datadog-agent/pkg/util/log" ) // mutex protecting build process @@ -35,6 +36,7 @@ func OpenFromProcess(t *testing.T, programExecutable string, paths ...string) (* cmd.Stderr = patternScanner require.NoError(t, cmd.Start()) + log.Infof("exec prog=%s, paths=%v | PID = %d", programExecutable, paths, cmd.Process.Pid) t.Cleanup(func() { if cmd.Process == nil { @@ -50,7 +52,7 @@ func OpenFromProcess(t *testing.T, programExecutable string, paths ...string) (* case <-time.After(time.Second * 5): patternScanner.PrintLogs(t) // please don't use t.Fatalf() here as we could test if it failed later - return nil, fmt.Errorf("couldn't luanch process in time") + return nil, fmt.Errorf("couldn't launch process in time") } } } diff --git a/pkg/network/usm/sharedlibraries/types.go b/pkg/network/usm/sharedlibraries/types.go index 906bcb7188967..066b69fbc2b0a 100644 --- a/pkg/network/usm/sharedlibraries/types.go +++ b/pkg/network/usm/sharedlibraries/types.go @@ -12,8 +12,8 @@ package sharedlibraries */ import "C" -type libPath C.lib_path_t +type LibPath C.lib_path_t const ( - libPathMaxSize = C.LIB_PATH_MAX_SIZE + LibPathMaxSize = C.LIB_PATH_MAX_SIZE ) diff --git a/pkg/network/usm/sharedlibraries/types_linux.go b/pkg/network/usm/sharedlibraries/types_linux.go index c857c249155e7..3240185a07632 100644 --- a/pkg/network/usm/sharedlibraries/types_linux.go +++ b/pkg/network/usm/sharedlibraries/types_linux.go @@ -3,12 +3,12 @@ package sharedlibraries -type libPath struct { +type LibPath struct { Pid uint32 Len uint32 Buf [120]byte } const ( - libPathMaxSize = 0x78 + LibPathMaxSize = 0x78 ) diff --git a/pkg/network/usm/sharedlibraries/watcher.go b/pkg/network/usm/sharedlibraries/watcher.go index 1342e2c8fd50b..ab0a9a4bfff81 100644 --- a/pkg/network/usm/sharedlibraries/watcher.go +++ b/pkg/network/usm/sharedlibraries/watcher.go @@ -32,11 +32,13 @@ const ( scanTerminatedProcessesInterval = 30 * time.Second ) -func toLibPath(data []byte) libPath { - return *(*libPath)(unsafe.Pointer(&data[0])) +// ToLibPath casts the perf event data to the LibPath structure +func ToLibPath(data []byte) LibPath { + return *(*LibPath)(unsafe.Pointer(&data[0])) } -func toBytes(l *libPath) []byte { +// ToBytes converts the libpath to a byte array containing the path +func ToBytes(l *LibPath) []byte { return l.Buf[:l.Len] } @@ -56,7 +58,7 @@ type Watcher struct { loadEvents *ddebpf.PerfHandler processMonitor *monitor.ProcessMonitor registry *utils.FileRegistry - ebpfProgram *ebpfProgram + ebpfProgram *EbpfProgram // telemetry libHits *telemetry.Counter @@ -68,7 +70,7 @@ var _ utils.Attacher = &Watcher{} // NewWatcher creates a new Watcher instance func NewWatcher(cfg *config.Config, rules ...Rule) (*Watcher, error) { - ebpfProgram := newEBPFProgram(cfg) + ebpfProgram := NewEBPFProgram(&cfg.Config) err := ebpfProgram.Init() if err != nil { return nil, fmt.Errorf("error initializing shared library program: %w", err) @@ -255,7 +257,7 @@ func (w *Watcher) Start() { return } - lib := toLibPath(event.Data) + lib := ToLibPath(event.Data) if int(lib.Pid) == thisPID { // don't scan ourself event.Done() @@ -263,7 +265,7 @@ func (w *Watcher) Start() { } w.libHits.Add(1) - path := toBytes(&lib) + path := ToBytes(&lib) for _, r := range w.rules { if r.Re.Match(path) { w.libMatches.Add(1) diff --git a/pkg/network/usm/tests/tracer_classification_test.go b/pkg/network/usm/tests/tracer_classification_test.go index 582425dc6c360..fb167ab7e093c 100644 --- a/pkg/network/usm/tests/tracer_classification_test.go +++ b/pkg/network/usm/tests/tracer_classification_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network" @@ -52,7 +51,7 @@ func TestMain(m *testing.M) { func setupTracer(t testing.TB, cfg *config.Config) *tracer.Tracer { if ebpftest.GetBuildMode() == ebpftest.Fentry { - ddconfig.SetFeatures(t, env.ECSFargate) + env.SetFeatures(t, env.ECSFargate) // protocol classification not yet supported on fargate cfg.ProtocolClassificationEnabled = false } diff --git a/pkg/network/usm/tests/tracer_usm_linux_test.go b/pkg/network/usm/tests/tracer_usm_linux_test.go index 9399e6b1cb210..2da79eadbf80a 100644 --- a/pkg/network/usm/tests/tracer_usm_linux_test.go +++ b/pkg/network/usm/tests/tracer_usm_linux_test.go @@ -118,7 +118,7 @@ func skipIfUsingNAT(t *testing.T, ctx testContext) { // skipIfGoTLSNotSupported skips the test if GoTLS is not supported. func skipIfGoTLSNotSupported(t *testing.T, _ testContext) { - if !gotlstestutil.GoTLSSupported(config.New()) { + if !gotlstestutil.GoTLSSupported(t, config.New()) { t.Skip("GoTLS is not supported") } } @@ -183,7 +183,7 @@ func (s *USMSuite) TestProtocolClassification() { cfg.EnableNativeTLSMonitoring = true cfg.EnableHTTPMonitoring = true cfg.EnablePostgresMonitoring = true - cfg.EnableGoTLSSupport = gotlstestutil.GoTLSSupported(cfg) + cfg.EnableGoTLSSupport = gotlstestutil.GoTLSSupported(t, cfg) cfg.BypassEnabled = true tr, err := tracer.NewTracer(cfg, nil) require.NoError(t, err) diff --git a/pkg/network/usm/testutil/generic_testutil_builder.go b/pkg/network/usm/testutil/generic_testutil_builder.go index af5cacfcfcdb8..899cce0aa70ca 100644 --- a/pkg/network/usm/testutil/generic_testutil_builder.go +++ b/pkg/network/usm/testutil/generic_testutil_builder.go @@ -13,12 +13,16 @@ import ( "path" ) -// BuildGoBinaryWrapper builds a Go binary and returns the path to it. +const ( + baseLDFlags = "-ldflags=-extldflags '-static'" +) + +// buildGoBinary builds a Go binary and returns the path to it. // If the binary is already built (meanly in the CI), it returns the // path to the binary. -func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) { - serverSrcDir := path.Join(curDir, binaryDir) - cachedServerBinaryPath := path.Join(serverSrcDir, binaryDir) +func buildGoBinary(srcDir, outPath, buildFlags string) (string, error) { + serverSrcDir := srcDir + cachedServerBinaryPath := outPath // If there is a compiled binary already, skip the compilation. // Meant for the CI. @@ -26,7 +30,7 @@ func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) { return cachedServerBinaryPath, nil } - c := exec.Command("go", "build", "-buildvcs=false", "-a", "-tags=test", "-ldflags=-extldflags '-static'", "-o", cachedServerBinaryPath, serverSrcDir) + c := exec.Command("go", "build", "-buildvcs=false", "-a", "-tags=test,netgo", buildFlags, "-o", cachedServerBinaryPath, serverSrcDir) out, err := c.CombinedOutput() if err != nil { return "", fmt.Errorf("could not build unix transparent proxy server test binary: %s\noutput: %s", err, string(out)) @@ -34,3 +38,21 @@ func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) { return cachedServerBinaryPath, nil } + +// BuildGoBinaryWrapper builds a Go binary and returns the path to it. +// If the binary is already built (meanly in the CI), it returns the +// path to the binary. +func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) { + srcDir := path.Join(curDir, binaryDir) + outPath := path.Join(srcDir, binaryDir) + return buildGoBinary(srcDir, outPath, baseLDFlags) +} + +// BuildGoBinaryWrapperWithoutSymbols builds a Go binary without symbols and returns the path to it. +// If the binary is already built (meanly in the CI), it returns the +// path to the binary. +func BuildGoBinaryWrapperWithoutSymbols(curDir, binaryDir string) (string, error) { + srcDir := path.Join(curDir, binaryDir) + outPath := path.Join(srcDir, binaryDir+"-nosymbols") + return buildGoBinary(srcDir, outPath, baseLDFlags+" -s -w") +} diff --git a/pkg/network/usm/usm_grpc_monitor_test.go b/pkg/network/usm/usm_grpc_monitor_test.go index 73e3a5de28f2e..35ae34a9d46d4 100644 --- a/pkg/network/usm/usm_grpc_monitor_test.go +++ b/pkg/network/usm/usm_grpc_monitor_test.go @@ -71,7 +71,7 @@ func TestGRPCScenarios(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - if tc.isTLS && !gotlsutils.GoTLSSupported(config.New()) { + if tc.isTLS && !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } suite.Run(t, &usmGRPCSuite{isTLS: tc.isTLS}) diff --git a/pkg/network/usm/usm_http2_monitor_test.go b/pkg/network/usm/usm_http2_monitor_test.go index 825c842fe68cd..46c3188b09833 100644 --- a/pkg/network/usm/usm_http2_monitor_test.go +++ b/pkg/network/usm/usm_http2_monitor_test.go @@ -108,7 +108,7 @@ func TestHTTP2Scenarios(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - if tc.isTLS && !gotlsutils.GoTLSSupported(config.New()) { + if tc.isTLS && !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } suite.Run(t, &usmHTTP2Suite{isTLS: tc.isTLS}) diff --git a/pkg/network/usm/utils/file_registry.go b/pkg/network/usm/utils/file_registry.go index 5ea71e3838603..2b212074a7413 100644 --- a/pkg/network/usm/utils/file_registry.go +++ b/pkg/network/usm/utils/file_registry.go @@ -78,7 +78,8 @@ func NewFilePath(procRoot, namespacedPath string, pid uint32) (FilePath, error) return FilePath{HostPath: path, ID: pathID, PID: pid}, nil } -type callback func(FilePath) error +// Callback is a function that is executed when a file becomes active or inactive +type Callback func(FilePath) error // IgnoreCB is just a dummy callback that doesn't do anything // Meant for testing purposes @@ -122,7 +123,7 @@ var ( // If no current registration exists for the given `PathIdentifier`, we execute // its *activation* callback. Otherwise, we increment the reference counter for // the existing registration if and only if `pid` is new; -func (r *FileRegistry) Register(namespacedPath string, pid uint32, activationCB, deactivationCB, alreadyRegistered callback) error { +func (r *FileRegistry) Register(namespacedPath string, pid uint32, activationCB, deactivationCB, alreadyRegistered Callback) error { if activationCB == nil || deactivationCB == nil { return errCallbackIsMissing } @@ -281,7 +282,7 @@ func (r *FileRegistry) Clear() { r.stopped = true } -func (r *FileRegistry) newRegistration(sampleFilePath string, deactivationCB callback) *registration { +func (r *FileRegistry) newRegistration(sampleFilePath string, deactivationCB Callback) *registration { return ®istration{ deactivationCB: deactivationCB, uniqueProcessesCount: atomic.NewInt32(1), @@ -292,7 +293,7 @@ func (r *FileRegistry) newRegistration(sampleFilePath string, deactivationCB cal type registration struct { uniqueProcessesCount *atomic.Int32 - deactivationCB callback + deactivationCB Callback // we are sharing the telemetry from FileRegistry telemetry *registryTelemetry diff --git a/pkg/networkdevice/pinger/pinger_linux.go b/pkg/networkdevice/pinger/pinger_linux.go index 059de377911ef..89476acbc920b 100644 --- a/pkg/networkdevice/pinger/pinger_linux.go +++ b/pkg/networkdevice/pinger/pinger_linux.go @@ -10,7 +10,7 @@ package pinger import ( "encoding/json" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -42,7 +42,7 @@ func (p *LinuxPinger) Ping(host string) (*Result, error) { } tu, err := net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { log.Warnf("could not initialize system-probe connection: %s", err.Error()) return nil, err diff --git a/pkg/networkdevice/utils/utils_test.go b/pkg/networkdevice/utils/utils_test.go index 3afbace214c6d..b1d20706b9a8f 100644 --- a/pkg/networkdevice/utils/utils_test.go +++ b/pkg/networkdevice/utils/utils_test.go @@ -9,9 +9,10 @@ import ( "fmt" "testing" - "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/version" "github.com/stretchr/testify/assert" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/version" ) func Test_CopyStrings(t *testing.T) { @@ -28,8 +29,8 @@ func Test_BoolToFloat64(t *testing.T) { } func Test_getAgentTags(t *testing.T) { - config.Datadog().SetWithoutSource("hostname", "my-host") - defer config.Datadog().SetWithoutSource("hostname", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "my-host") + defer pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") assert.Equal(t, []string{ "agent_host:my-host", diff --git a/pkg/networkpath/payload/pathevent.go b/pkg/networkpath/payload/pathevent.go index 25fa9f748cffc..121461c10a739 100644 --- a/pkg/networkpath/payload/pathevent.go +++ b/pkg/networkpath/payload/pathevent.go @@ -33,9 +33,13 @@ const ( // NetworkPathHop encapsulates the data for a single // hop within a path type NetworkPathHop struct { - TTL int `json:"ttl"` - IPAddress string `json:"ip_address"` - Hostname string `json:"hostname,omitempty"` + TTL int `json:"ttl"` + IPAddress string `json:"ip_address"` + + // hostname is the reverse DNS of the ip_address + // TODO (separate PR): we might want to rename it to reverse_dns_hostname for consistency with destination.reverse_dns_hostname + Hostname string `json:"hostname,omitempty"` + RTT float64 `json:"rtt,omitempty"` Reachable bool `json:"reachable"` } @@ -53,22 +57,24 @@ type NetworkPathSource struct { // NetworkPathDestination encapsulates information // about the destination of a path type NetworkPathDestination struct { - Hostname string `json:"hostname"` - IPAddress string `json:"ip_address"` - Port uint16 `json:"port"` - Service string `json:"service,omitempty"` + Hostname string `json:"hostname"` + IPAddress string `json:"ip_address"` + Port uint16 `json:"port"` + Service string `json:"service,omitempty"` + ReverseDNSHostname string `json:"reverse_dns_hostname,omitempty"` } // NetworkPath encapsulates data that defines a // path between two hosts as mapped by the agent type NetworkPath struct { - Timestamp int64 `json:"timestamp"` - Namespace string `json:"namespace"` // namespace used to resolve NDM resources - PathtraceID string `json:"pathtrace_id"` - Origin PathOrigin `json:"origin"` - Protocol Protocol `json:"protocol"` - Source NetworkPathSource `json:"source"` - Destination NetworkPathDestination `json:"destination"` - Hops []NetworkPathHop `json:"hops"` - Tags []string `json:"tags,omitempty"` + Timestamp int64 `json:"timestamp"` + AgentVersion string `json:"agent_version"` + Namespace string `json:"namespace"` // namespace used to resolve NDM resources + PathtraceID string `json:"pathtrace_id"` + Origin PathOrigin `json:"origin"` + Protocol Protocol `json:"protocol"` + Source NetworkPathSource `json:"source"` + Destination NetworkPathDestination `json:"destination"` + Hops []NetworkPathHop `json:"hops"` + Tags []string `json:"tags,omitempty"` } diff --git a/pkg/networkpath/traceroute/runner.go b/pkg/networkpath/traceroute/runner.go index cf57bb40f73d0..d75738e6c5260 100644 --- a/pkg/networkpath/traceroute/runner.go +++ b/pkg/networkpath/traceroute/runner.go @@ -15,6 +15,7 @@ import ( "sort" "time" + "github.com/DataDog/datadog-agent/pkg/version" "github.com/Datadog/dublin-traceroute/go/dublintraceroute/probes/probev4" "github.com/Datadog/dublin-traceroute/go/dublintraceroute/results" "github.com/vishvananda/netns" @@ -42,8 +43,6 @@ const ( DefaultNumPaths = 1 // DefaultMinTTL defines the default minimum TTL DefaultMinTTL = 1 - // DefaultMaxTTL defines the default maximum TTL - DefaultMaxTTL = 30 // DefaultDelay defines the default delay DefaultDelay = 50 //msec // DefaultOutputFormat defines the default output format @@ -117,12 +116,12 @@ func (r *Runner) RunTraceroute(ctx context.Context, cfg Config) (payload.Network maxTTL := cfg.MaxTTL if maxTTL == 0 { - maxTTL = DefaultMaxTTL + maxTTL = setup.DefaultNetworkPathMaxTTL } var timeout time.Duration if cfg.Timeout == 0 { - timeout = setup.DefaultNetworkPathTimeout * time.Millisecond + timeout = setup.DefaultNetworkPathTimeout * time.Duration(maxTTL) * time.Millisecond } else { timeout = cfg.Timeout } @@ -227,17 +226,19 @@ func (r *Runner) runTCP(cfg Config, hname string, target net.IP, maxTTL uint8, t func (r *Runner) processTCPResults(res *tcp.Results, hname string, destinationHost string, destinationPort uint16, destinationIP net.IP) (payload.NetworkPath, error) { traceroutePath := payload.NetworkPath{ - PathtraceID: payload.NewPathtraceID(), - Protocol: payload.ProtocolTCP, - Timestamp: time.Now().UnixMilli(), + AgentVersion: version.AgentVersion, + PathtraceID: payload.NewPathtraceID(), + Protocol: payload.ProtocolTCP, + Timestamp: time.Now().UnixMilli(), Source: payload.NetworkPathSource{ Hostname: hname, NetworkID: r.networkID, }, Destination: payload.NetworkPathDestination{ - Hostname: getDestinationHostname(destinationHost), - Port: destinationPort, - IPAddress: destinationIP.String(), + Hostname: destinationHost, + Port: destinationPort, + IPAddress: destinationIP.String(), + ReverseDNSHostname: getReverseDNSForIP(destinationIP), }, } @@ -289,17 +290,19 @@ func (r *Runner) processUDPResults(res *results.Results, hname string, destinati } traceroutePath := payload.NetworkPath{ - PathtraceID: payload.NewPathtraceID(), - Protocol: payload.ProtocolUDP, - Timestamp: time.Now().UnixMilli(), + AgentVersion: version.AgentVersion, + PathtraceID: payload.NewPathtraceID(), + Protocol: payload.ProtocolUDP, + Timestamp: time.Now().UnixMilli(), Source: payload.NetworkPathSource{ Hostname: hname, NetworkID: r.networkID, }, Destination: payload.NetworkPathDestination{ - Hostname: getDestinationHostname(destinationHost), - Port: destinationPort, - IPAddress: destinationIP.String(), + Hostname: destinationHost, + Port: destinationPort, + IPAddress: destinationIP.String(), + ReverseDNSHostname: getReverseDNSForIP(destinationIP), }, } diff --git a/pkg/networkpath/traceroute/tcp/tcpv4.go b/pkg/networkpath/traceroute/tcp/tcpv4.go index 6b5e9b8db94a0..23f3c45950689 100644 --- a/pkg/networkpath/traceroute/tcp/tcpv4.go +++ b/pkg/networkpath/traceroute/tcp/tcpv4.go @@ -103,15 +103,9 @@ func (t *TCPv4) TracerouteSequential() (*Results, error) { // hops should be of length # of hops hops := make([]*Hop, 0, t.MaxTTL-t.MinTTL) - // TODO: better logic around timeout for sequential is needed - // right now we're just hacking around the existing - // need to convert uint8 to int for proper conversion to - // time.Duration - timeout := t.Timeout / time.Duration(int(t.MaxTTL-t.MinTTL)) - for i := int(t.MinTTL); i <= int(t.MaxTTL); i++ { seqNumber := rand.Uint32() - hop, err := t.sendAndReceive(rawIcmpConn, rawTCPConn, i, seqNumber, timeout) + hop, err := t.sendAndReceive(rawIcmpConn, rawTCPConn, i, seqNumber, t.Timeout) if err != nil { return nil, fmt.Errorf("failed to run traceroute: %w", err) } diff --git a/pkg/networkpath/traceroute/tcp/utils.go b/pkg/networkpath/traceroute/tcp/utils.go index eba21aa3631c9..7eb8c5cf45222 100644 --- a/pkg/networkpath/traceroute/tcp/utils.go +++ b/pkg/networkpath/traceroute/tcp/utils.go @@ -151,6 +151,8 @@ func listenPackets(icmpConn rawConnWrapper, tcpConn rawConnWrapper, timeout time var icmpIP net.IP var tcpIP net.IP var icmpCode layers.ICMPv4TypeCode + var tcpFinished time.Time + var icmpFinished time.Time var port uint16 wg.Add(2) ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -158,24 +160,21 @@ func listenPackets(icmpConn rawConnWrapper, tcpConn rawConnWrapper, timeout time go func() { defer wg.Done() defer cancel() - tcpIP, port, _, tcpErr = handlePackets(ctx, tcpConn, "tcp", localIP, localPort, remoteIP, remotePort, seqNum) + tcpIP, port, _, tcpFinished, tcpErr = handlePackets(ctx, tcpConn, "tcp", localIP, localPort, remoteIP, remotePort, seqNum) }() go func() { defer wg.Done() defer cancel() - icmpIP, _, icmpCode, icmpErr = handlePackets(ctx, icmpConn, "icmp", localIP, localPort, remoteIP, remotePort, seqNum) + icmpIP, _, icmpCode, icmpFinished, icmpErr = handlePackets(ctx, icmpConn, "icmp", localIP, localPort, remoteIP, remotePort, seqNum) }() wg.Wait() - // TODO: while this is okay, we - // should do this more cleanly - finished := time.Now() if tcpErr != nil && icmpErr != nil { _, tcpCanceled := tcpErr.(canceledError) _, icmpCanceled := icmpErr.(canceledError) if icmpCanceled && tcpCanceled { log.Trace("timed out waiting for responses") - return net.IP{}, 0, 0, finished, nil + return net.IP{}, 0, 0, time.Time{}, nil } if tcpErr != nil { log.Errorf("TCP listener error: %s", tcpErr.Error()) @@ -184,34 +183,34 @@ func listenPackets(icmpConn rawConnWrapper, tcpConn rawConnWrapper, timeout time log.Errorf("ICMP listener error: %s", icmpErr.Error()) } - return net.IP{}, 0, 0, finished, multierr.Append(fmt.Errorf("tcp error: %w", tcpErr), fmt.Errorf("icmp error: %w", icmpErr)) + return net.IP{}, 0, 0, time.Time{}, multierr.Append(fmt.Errorf("tcp error: %w", tcpErr), fmt.Errorf("icmp error: %w", icmpErr)) } // if there was an error for TCP, but not // ICMP, return the ICMP response if tcpErr != nil { - return icmpIP, port, icmpCode, finished, nil + return icmpIP, port, icmpCode, icmpFinished, nil } // return the TCP response - return tcpIP, port, 0, finished, nil + return tcpIP, port, 0, tcpFinished, nil } // handlePackets in its current implementation should listen for the first matching // packet on the connection and then return. If no packet is received within the // timeout or if the listener is canceled, it should return a canceledError -func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) (net.IP, uint16, layers.ICMPv4TypeCode, error) { +func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) (net.IP, uint16, layers.ICMPv4TypeCode, time.Time, error) { buf := make([]byte, 1024) for { select { case <-ctx.Done(): - return net.IP{}, 0, 0, canceledError("listener canceled") + return net.IP{}, 0, 0, time.Time{}, canceledError("listener canceled") default: } now := time.Now() err := conn.SetReadDeadline(now.Add(time.Millisecond * 100)) if err != nil { - return net.IP{}, 0, 0, fmt.Errorf("failed to read: %w", err) + return net.IP{}, 0, 0, time.Time{}, fmt.Errorf("failed to read: %w", err) } header, packet, _, err := conn.ReadFrom(buf) if err != nil { @@ -220,8 +219,12 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo continue } } - return net.IP{}, 0, 0, err + return net.IP{}, 0, 0, time.Time{}, err } + // once we have a packet, take a timestamp to know when + // the response was received, if it matches, we will + // return this timestamp + received := time.Now() // TODO: remove listener constraint and parse all packets // in the same function return a succinct struct here if listener == "icmp" { @@ -231,7 +234,7 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo continue } if icmpMatch(localIP, localPort, remoteIP, remotePort, seqNum, icmpResponse) { - return icmpResponse.SrcIP, 0, icmpResponse.TypeCode, nil + return icmpResponse.SrcIP, 0, icmpResponse.TypeCode, received, nil } } else if listener == "tcp" { tcpResp, err := parseTCP(header, packet) @@ -240,10 +243,10 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo continue } if tcpMatch(localIP, localPort, remoteIP, remotePort, seqNum, tcpResp) { - return tcpResp.SrcIP, uint16(tcpResp.TCPResponse.SrcPort), 0, nil + return tcpResp.SrcIP, uint16(tcpResp.TCPResponse.SrcPort), 0, received, nil } } else { - return net.IP{}, 0, 0, fmt.Errorf("unsupported listener type") + return net.IP{}, 0, 0, received, fmt.Errorf("unsupported listener type") } } } @@ -258,7 +261,6 @@ func parseICMP(header *ipv4.Header, payload []byte) (*icmpResponse, error) { if header.Protocol != IPProtoICMP || header.Version != 4 || header.Src == nil || header.Dst == nil { - log.Errorf("invalid IP header for ICMP packet") return nil, fmt.Errorf("invalid IP header for ICMP packet: %+v", header) } icmpResponse.SrcIP = header.Src @@ -312,7 +314,6 @@ func parseTCP(header *ipv4.Header, payload []byte) (*tcpResponse, error) { if header.Protocol != IPProtoTCP || header.Version != 4 || header.Src == nil || header.Dst == nil { - log.Errorf("invalid IP header for TCP packet") return nil, fmt.Errorf("invalid IP header for TCP packet: %+v", header) } tcpResponse.SrcIP = header.Src diff --git a/pkg/networkpath/traceroute/tcp/utils_test.go b/pkg/networkpath/traceroute/tcp/utils_test.go index 4bd4b996b0e3f..a7ac53e57cc68 100644 --- a/pkg/networkpath/traceroute/tcp/utils_test.go +++ b/pkg/networkpath/traceroute/tcp/utils_test.go @@ -26,6 +26,9 @@ import ( var ( srcIP = net.ParseIP("1.2.3.4") dstIP = net.ParseIP("5.6.7.8") + + innerSrcIP = net.ParseIP("10.0.0.1") + innerDstIP = net.ParseIP("192.168.1.1") ) type ( @@ -47,6 +50,8 @@ type ( ) func Test_handlePackets(t *testing.T) { + _, tcpBytes := createMockTCPPacket(createMockIPv4Header(dstIP, srcIP, 6), createMockTCPLayer(443, 12345, 28394, 28395, true, true, true)) + tt := []struct { description string // input @@ -121,13 +126,47 @@ func Test_handlePackets(t *testing.T) { listener: "tcp", errMsg: "canceled", }, + { + description: "successful ICMP parsing returns IP, port, and type code", + ctxTimeout: 500 * time.Millisecond, + conn: &mockRawConn{ + header: createMockIPv4Header(srcIP, dstIP, 1), + payload: createMockICMPPacket(createMockICMPLayer(layers.ICMPv4CodeTTLExceeded), createMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), createMockTCPLayer(12345, 443, 28394, 12737, true, true, true), false), + }, + localIP: innerSrcIP, + localPort: 12345, + remoteIP: innerDstIP, + remotePort: 443, + seqNum: 28394, + listener: "icmp", + expectedIP: srcIP, + expectedPort: 0, + expectedTypeCode: layers.ICMPv4CodeTTLExceeded, + }, + { + description: "successful TCP parsing returns IP, port, and type code", + ctxTimeout: 500 * time.Millisecond, + conn: &mockRawConn{ + header: createMockIPv4Header(dstIP, srcIP, 6), + payload: tcpBytes, + }, + localIP: srcIP, + localPort: 12345, + remoteIP: dstIP, + remotePort: 443, + seqNum: 28394, + listener: "tcp", + expectedIP: dstIP, + expectedPort: 443, + expectedTypeCode: 0, + }, } for _, test := range tt { t.Run(test.description, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), test.ctxTimeout) defer cancel() - actualIP, actualPort, actualTypeCode, err := handlePackets(ctx, test.conn, test.listener, test.localIP, test.localPort, test.remoteIP, test.remotePort, test.seqNum) + actualIP, actualPort, actualTypeCode, _, err := handlePackets(ctx, test.conn, test.listener, test.localIP, test.localPort, test.remoteIP, test.remotePort, test.seqNum) if test.errMsg != "" { require.Error(t, err) assert.True(t, strings.Contains(err.Error(), test.errMsg)) @@ -142,8 +181,6 @@ func Test_handlePackets(t *testing.T) { } func Test_parseICMP(t *testing.T) { - innerSrcIP := net.ParseIP("10.0.0.1") - innerDstIP := net.ParseIP("192.168.1.1") ipv4Header := createMockIPv4Header(srcIP, dstIP, 1) icmpLayer := createMockICMPLayer(layers.ICMPv4CodeTTLExceeded) innerIPv4Layer := createMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP) diff --git a/pkg/networkpath/traceroute/traceroute_linux.go b/pkg/networkpath/traceroute/traceroute_linux.go index 1e4e0f10321cb..547f0fa2ff501 100644 --- a/pkg/networkpath/traceroute/traceroute_linux.go +++ b/pkg/networkpath/traceroute/traceroute_linux.go @@ -12,7 +12,7 @@ import ( "encoding/json" "github.com/DataDog/datadog-agent/comp/core/telemetry" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -41,7 +41,7 @@ func New(cfg Config, _ telemetry.Component) (*LinuxTraceroute, error) { // Run executes a traceroute func (l *LinuxTraceroute) Run(_ context.Context) (payload.NetworkPath, error) { tu, err := net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { log.Warnf("could not initialize system-probe connection: %s", err.Error()) return payload.NetworkPath{}, err diff --git a/pkg/networkpath/traceroute/traceroute_windows.go b/pkg/networkpath/traceroute/traceroute_windows.go index f6e1702121b3b..089f46d216766 100644 --- a/pkg/networkpath/traceroute/traceroute_windows.go +++ b/pkg/networkpath/traceroute/traceroute_windows.go @@ -12,7 +12,7 @@ import ( "encoding/json" "github.com/DataDog/datadog-agent/comp/core/telemetry" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -41,7 +41,7 @@ func New(cfg Config, _ telemetry.Component) (*WindowsTraceroute, error) { // Run executes a traceroute func (w *WindowsTraceroute) Run(_ context.Context) (payload.NetworkPath, error) { tu, err := net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { log.Warnf("could not initialize system-probe connection: %s", err.Error()) return payload.NetworkPath{}, err diff --git a/pkg/networkpath/traceroute/utils.go b/pkg/networkpath/traceroute/utils.go index 29ab3a1066019..de67843a8330a 100644 --- a/pkg/networkpath/traceroute/utils.go +++ b/pkg/networkpath/traceroute/utils.go @@ -15,17 +15,11 @@ import ( var lookupAddrFn = net.DefaultResolver.LookupAddr -// getDestinationHostname tries to convert the input destinationHost to hostname. -// When input destinationHost is an IP, a reverse DNS call is made to convert it into a hostname. -func getDestinationHostname(destinationHost string) string { - destIP := net.ParseIP(destinationHost) - if destIP != nil { - reverseDNSHostname := getHostname(destinationHost) - if reverseDNSHostname != "" { - return reverseDNSHostname - } +func getReverseDNSForIP(destIP net.IP) string { + if destIP == nil { + return "" } - return destinationHost + return getHostname(destIP.String()) } func getHostname(ipAddr string) string { diff --git a/pkg/networkpath/traceroute/utils_test.go b/pkg/networkpath/traceroute/utils_test.go index 48dd5b8b13492..a7f42d875ee7c 100644 --- a/pkg/networkpath/traceroute/utils_test.go +++ b/pkg/networkpath/traceroute/utils_test.go @@ -14,15 +14,15 @@ import ( "github.com/stretchr/testify/assert" ) -func Test_getDestinationHostname(t *testing.T) { +func Test_getReverseDnsForIP(t *testing.T) { t.Run("reverse dns lookup successful", func(t *testing.T) { lookupAddrFn = func(_ context.Context, _ string) ([]string, error) { return []string{"domain-a.com", "domain-b.com"}, nil } defer func() { lookupAddrFn = net.DefaultResolver.LookupAddr }() - assert.Equal(t, "domain-a.com", getDestinationHostname("1.2.3.4")) - assert.Equal(t, "not-an-ip", getDestinationHostname("not-an-ip")) + assert.Equal(t, "domain-a.com", getReverseDNSForIP(net.ParseIP("1.2.3.4"))) + assert.Equal(t, "", getReverseDNSForIP(nil)) }) t.Run("reverse dns lookup failure", func(t *testing.T) { lookupAddrFn = func(_ context.Context, _ string) ([]string, error) { @@ -30,8 +30,8 @@ func Test_getDestinationHostname(t *testing.T) { } defer func() { lookupAddrFn = net.DefaultResolver.LookupAddr }() - assert.Equal(t, "1.2.3.4", getDestinationHostname("1.2.3.4")) - assert.Equal(t, "not-an-ip", getDestinationHostname("not-an-ip")) + assert.Equal(t, "1.2.3.4", getReverseDNSForIP(net.ParseIP("1.2.3.4"))) + assert.Equal(t, "", getReverseDNSForIP(nil)) }) } diff --git a/pkg/obfuscate/go.mod b/pkg/obfuscate/go.mod index e25602108c4fb..0d0fb90ced497 100644 --- a/pkg/obfuscate/go.mod +++ b/pkg/obfuscate/go.mod @@ -4,7 +4,7 @@ go 1.22.0 require ( github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/go-sqllexer v0.0.14 + github.com/DataDog/go-sqllexer v0.0.15 github.com/outcaste-io/ristretto v0.2.1 github.com/stretchr/testify v1.9.0 go.uber.org/atomic v1.10.0 diff --git a/pkg/obfuscate/go.sum b/pkg/obfuscate/go.sum index e6d91aba3fc14..b62d4da45dfe3 100644 --- a/pkg/obfuscate/go.sum +++ b/pkg/obfuscate/go.sum @@ -1,7 +1,7 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= -github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.15 h1:rUUu52dP8EQhJLnUw0MIAxZp0BQx2fOTuMztr3vtHUU= +github.com/DataDog/go-sqllexer v0.0.15/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= diff --git a/pkg/obfuscate/obfuscate.go b/pkg/obfuscate/obfuscate.go index fd5ac9ddd3f23..3032fb54f38a4 100644 --- a/pkg/obfuscate/obfuscate.go +++ b/pkg/obfuscate/obfuscate.go @@ -185,6 +185,11 @@ type SQLConfig struct { // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize". KeepIdentifierQuotation bool `json:"keep_identifier_quotation" yaml:"keep_identifier_quotation"` + // KeepJSONPath specifies whether to keep JSON paths following JSON operators in SQL statements in obfuscation. + // By default, JSON paths are treated as literals and are obfuscated to ?, e.g. "data::jsonb -> 'name'" -> "data::jsonb -> ?". + // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize". + KeepJSONPath bool `json:"keep_json_path" yaml:"keep_json_path"` + // Cache reports whether the obfuscator should use a LRU look-up cache for SQL obfuscations. Cache bool } diff --git a/pkg/obfuscate/sql.go b/pkg/obfuscate/sql.go index 18fc120a73b14..8674173c8fdc3 100644 --- a/pkg/obfuscate/sql.go +++ b/pkg/obfuscate/sql.go @@ -439,6 +439,7 @@ func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*Obfusca sqllexer.WithReplacePositionalParameter(!opts.KeepPositionalParameter), sqllexer.WithReplaceBoolean(!opts.KeepBoolean), sqllexer.WithReplaceNull(!opts.KeepNull), + sqllexer.WithKeepJsonPath(opts.KeepJSONPath), ) } diff --git a/pkg/obfuscate/sql_test.go b/pkg/obfuscate/sql_test.go index f6b7005c46342..db6d321be1c0f 100644 --- a/pkg/obfuscate/sql_test.go +++ b/pkg/obfuscate/sql_test.go @@ -756,6 +756,10 @@ func TestSQLTableFinderAndReplaceDigits(t *testing.T) { func TestSQLQuantizer(t *testing.T) { cases := []sqlTestCase{ + { + `SELECT "table"."field" FROM "table" WHERE "table"."otherfield" = $? AND "table"."thirdfield" = $?;`, + `SELECT table . field FROM table WHERE table . otherfield = ? AND table . thirdfield = ?`, + }, { "select * from users where id = 42", "select * from users where id = ?", @@ -2072,6 +2076,11 @@ func TestSQLLexerObfuscation(t *testing.T) { query: "SELECT * FROM users WHERE id = 1", expected: "SELECT * FROM users WHERE id = ?", }, + { + name: "dollar question paramerer", + query: `SELECT "table"."field" FROM "table" WHERE "table"."otherfield" = $? AND "table"."thirdfield" = $?;`, + expected: `SELECT "table"."field" FROM "table" WHERE "table"."otherfield" = $? AND "table"."thirdfield" = $?;`, + }, { name: "simple query obfuscation with replace digits", query: "SELECT * FROM users123 WHERE id = 1", @@ -2136,6 +2145,7 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { keepPositionalParameter bool keepTrailingSemicolon bool keepIdentifierQuotation bool + KeepJSONPath bool metadata SQLMetadata }{ { @@ -2417,6 +2427,50 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { Procedures: []string{}, }, }, + { + name: "select with json path not keep", + query: "SELECT * FROM users WHERE id = 1 AND name->'first' = 'test'", + expected: "SELECT * FROM users WHERE id = ? AND name -> ? = ?", + metadata: SQLMetadata{ + Size: 11, + TablesCSV: "users", + Commands: []string{ + "SELECT", + }, + Comments: []string{}, + Procedures: []string{}, + }, + }, + { + name: "select with json path ->", + query: "SELECT * FROM users WHERE id = 1 AND name->'first' = 'test'", + expected: "SELECT * FROM users WHERE id = ? AND name -> 'first' = ?", + KeepJSONPath: true, + metadata: SQLMetadata{ + Size: 11, + TablesCSV: "users", + Commands: []string{ + "SELECT", + }, + Comments: []string{}, + Procedures: []string{}, + }, + }, + { + name: "select with json path ->>", + query: "SELECT * FROM users WHERE id = 1 AND name->>2 = 'test'", + expected: "SELECT * FROM users WHERE id = ? AND name ->> 2 = ?", + KeepJSONPath: true, + metadata: SQLMetadata{ + Size: 11, + TablesCSV: "users", + Commands: []string{ + "SELECT", + }, + Comments: []string{}, + Procedures: []string{}, + }, + }, } for _, tt := range tests { @@ -2437,6 +2491,7 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { RemoveSpaceBetweenParentheses: tt.removeSpaceBetweenParentheses, KeepTrailingSemicolon: tt.keepTrailingSemicolon, KeepIdentifierQuotation: tt.keepIdentifierQuotation, + KeepJSONPath: tt.KeepJSONPath, }, }).ObfuscateSQLString(tt.query) require.NoError(t, err) diff --git a/pkg/obfuscate/sql_tokenizer.go b/pkg/obfuscate/sql_tokenizer.go index e379d7dde4a0e..190801d1dc722 100644 --- a/pkg/obfuscate/sql_tokenizer.go +++ b/pkg/obfuscate/sql_tokenizer.go @@ -473,10 +473,15 @@ func (tkn *SQLTokenizer) Scan() (TokenKind, []byte) { // modulo operator (e.g. 'id % 8') return TokenKind(ch), tkn.bytes() case '$': - if isDigit(tkn.lastChar) { - // TODO(gbbr): the first digit after $ does not necessarily guarantee - // that this isn't a dollar-quoted string constant. We might eventually - // want to cover for this use-case too (e.g. $1$some text$1$). + if isDigit(tkn.lastChar) || tkn.lastChar == '?' { + // TODO(knusbaum): Valid dollar quote tags start with alpha characters and contain no symbols. + // See: https://www.postgresql.org/docs/15/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS + // See also: https://pgpedia.info/d/dollar-quoting.html instead. + // + // Instances of $[integer] or $? are prepared statement variables. + // We may eventually want to expand this to check for symbols other than numbers and '?', + // since other symbols are not valid dollar quote tags, but for now this covers prepared statement + // variables without exposing us to more risk of not obfuscating something than necessary. return tkn.scanPreparedStatement('$') } @@ -678,11 +683,16 @@ func (tkn *SQLTokenizer) scanDollarQuotedString() (TokenKind, []byte) { func (tkn *SQLTokenizer) scanPreparedStatement(_ rune) (TokenKind, []byte) { // a prepared statement expect a digit identifier like $1 - if !isDigit(tkn.lastChar) { + if !isDigit(tkn.lastChar) && tkn.lastChar != '?' { tkn.setErr(`prepared statements must start with digits, got "%c" (%d)`, tkn.lastChar, tkn.lastChar) return LexError, tkn.bytes() } + if tkn.lastChar == '?' { + tkn.advance() + return PreparedStatement, tkn.bytes() + } + // scanNumber keeps the prefix rune intact. // read numbers and return an error if any token, buff := tkn.scanNumber(false) diff --git a/pkg/orchestrator/config/config.go b/pkg/orchestrator/config/config.go index 870b6e0e5ebf3..564de4d3abc1e 100644 --- a/pkg/orchestrator/config/config.go +++ b/pkg/orchestrator/config/config.go @@ -13,8 +13,8 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator/redact" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" @@ -86,8 +86,8 @@ func (oc *OrchestratorConfig) Load() error { } oc.OrchestratorEndpoints[0].Endpoint = URL - if key := "api_key"; config.Datadog().IsSet(key) { - oc.OrchestratorEndpoints[0].APIKey = utils.SanitizeAPIKey(config.Datadog().GetString(key)) + if key := "api_key"; pkgconfigsetup.Datadog().IsSet(key) { + oc.OrchestratorEndpoints[0].APIKey = utils.SanitizeAPIKey(pkgconfigsetup.Datadog().GetString(key)) } if err := extractOrchestratorAdditionalEndpoints(URL, &oc.OrchestratorEndpoints); err != nil { @@ -95,12 +95,12 @@ func (oc *OrchestratorConfig) Load() error { } // A custom word list to enhance the default one used by the DataScrubber - if k := OrchestratorNSKey("custom_sensitive_words"); config.Datadog().IsSet(k) { - oc.Scrubber.AddCustomSensitiveWords(config.Datadog().GetStringSlice(k)) + if k := OrchestratorNSKey("custom_sensitive_words"); pkgconfigsetup.Datadog().IsSet(k) { + oc.Scrubber.AddCustomSensitiveWords(pkgconfigsetup.Datadog().GetStringSlice(k)) } - if k := OrchestratorNSKey("custom_sensitive_annotations_labels"); config.Datadog().IsSet(k) { - redact.UpdateSensitiveAnnotationsAndLabels(config.Datadog().GetStringSlice(k)) + if k := OrchestratorNSKey("custom_sensitive_annotations_labels"); pkgconfigsetup.Datadog().IsSet(k) { + redact.UpdateSensitiveAnnotationsAndLabels(pkgconfigsetup.Datadog().GetStringSlice(k)) } // The maximum number of resources per message and the maximum message size. @@ -108,8 +108,8 @@ func (oc *OrchestratorConfig) Load() error { setBoundedConfigIntValue(OrchestratorNSKey("max_per_message"), maxMessageBatch, func(v int) { oc.MaxPerMessage = v }) setBoundedConfigIntValue(OrchestratorNSKey("max_message_bytes"), maxMessageSize, func(v int) { oc.MaxWeightPerMessageBytes = v }) - if k := key(processNS, "pod_queue_bytes"); config.Datadog().IsSet(k) { - if queueBytes := config.Datadog().GetInt(k); queueBytes > 0 { + if k := key(processNS, "pod_queue_bytes"); pkgconfigsetup.Datadog().IsSet(k) { + if queueBytes := pkgconfigsetup.Datadog().GetInt(k); queueBytes > 0 { oc.PodQueueBytes = queueBytes } } @@ -117,22 +117,22 @@ func (oc *OrchestratorConfig) Load() error { // Orchestrator Explorer oc.OrchestrationCollectionEnabled, oc.KubeClusterName = IsOrchestratorEnabled() - oc.CollectorDiscoveryEnabled = config.Datadog().GetBool(OrchestratorNSKey("collector_discovery.enabled")) - oc.IsScrubbingEnabled = config.Datadog().GetBool(OrchestratorNSKey("container_scrubbing.enabled")) - oc.ExtraTags = config.Datadog().GetStringSlice(OrchestratorNSKey("extra_tags")) - oc.IsManifestCollectionEnabled = config.Datadog().GetBool(OrchestratorNSKey("manifest_collection.enabled")) - oc.BufferedManifestEnabled = config.Datadog().GetBool(OrchestratorNSKey("manifest_collection.buffer_manifest")) - oc.ManifestBufferFlushInterval = config.Datadog().GetDuration(OrchestratorNSKey("manifest_collection.buffer_flush_interval")) + oc.CollectorDiscoveryEnabled = pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("collector_discovery.enabled")) + oc.IsScrubbingEnabled = pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("container_scrubbing.enabled")) + oc.ExtraTags = pkgconfigsetup.Datadog().GetStringSlice(OrchestratorNSKey("extra_tags")) + oc.IsManifestCollectionEnabled = pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("manifest_collection.enabled")) + oc.BufferedManifestEnabled = pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("manifest_collection.buffer_manifest")) + oc.ManifestBufferFlushInterval = pkgconfigsetup.Datadog().GetDuration(OrchestratorNSKey("manifest_collection.buffer_flush_interval")) return nil } func extractOrchestratorAdditionalEndpoints(URL *url.URL, orchestratorEndpoints *[]apicfg.Endpoint) error { - if k := OrchestratorNSKey("orchestrator_additional_endpoints"); config.Datadog().IsSet(k) { + if k := OrchestratorNSKey("orchestrator_additional_endpoints"); pkgconfigsetup.Datadog().IsSet(k) { if err := extractEndpoints(URL, k, orchestratorEndpoints); err != nil { return err } - } else if k := key(processNS, "orchestrator_additional_endpoints"); config.Datadog().IsSet(k) { + } else if k := key(processNS, "orchestrator_additional_endpoints"); pkgconfigsetup.Datadog().IsSet(k) { if err := extractEndpoints(URL, k, orchestratorEndpoints); err != nil { return err } @@ -141,7 +141,7 @@ func extractOrchestratorAdditionalEndpoints(URL *url.URL, orchestratorEndpoints } func extractEndpoints(URL *url.URL, k string, endpoints *[]apicfg.Endpoint) error { - for endpointURL, apiKeys := range config.Datadog().GetStringMapStringSlice(k) { + for endpointURL, apiKeys := range pkgconfigsetup.Datadog().GetStringMapStringSlice(k) { u, err := URL.Parse(endpointURL) if err != nil { return fmt.Errorf("invalid additional endpoint url '%s': %s", endpointURL, err) @@ -160,7 +160,7 @@ func extractEndpoints(URL *url.URL, k string, endpoints *[]apicfg.Endpoint) erro func extractOrchestratorDDUrl() (*url.URL, error) { orchestratorURL := OrchestratorNSKey("orchestrator_dd_url") processURL := key(processNS, "orchestrator_dd_url") - URL, err := url.Parse(utils.GetMainEndpointBackwardCompatible(config.Datadog(), "https://orchestrator.", orchestratorURL, processURL)) + URL, err := url.Parse(utils.GetMainEndpointBackwardCompatible(pkgconfigsetup.Datadog(), "https://orchestrator.", orchestratorURL, processURL)) if err != nil { return nil, fmt.Errorf("error parsing orchestrator_dd_url: %s", err) } @@ -168,11 +168,11 @@ func extractOrchestratorDDUrl() (*url.URL, error) { } func setBoundedConfigIntValue(configKey string, upperBound int, setter func(v int)) { - if !config.Datadog().IsSet(configKey) { + if !pkgconfigsetup.Datadog().IsSet(configKey) { return } - val := config.Datadog().GetInt(configKey) + val := pkgconfigsetup.Datadog().GetInt(configKey) if val <= 0 { pkglog.Warnf("Ignoring invalid value for setting %s (<=0)", configKey) @@ -188,7 +188,7 @@ func setBoundedConfigIntValue(configKey string, upperBound int, setter func(v in // IsOrchestratorEnabled checks if orchestrator explorer features are enabled, it returns the boolean and the cluster name func IsOrchestratorEnabled() (bool, string) { - enabled := config.Datadog().GetBool(OrchestratorNSKey("enabled")) + enabled := pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("enabled")) var clusterName string if enabled { // Set clustername @@ -200,11 +200,11 @@ func IsOrchestratorEnabled() (bool, string) { // IsOrchestratorECSExplorerEnabled checks if orchestrator ecs explorer features are enabled func IsOrchestratorECSExplorerEnabled() bool { - if !config.Datadog().GetBool(OrchestratorNSKey("enabled")) { + if !pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("enabled")) { return false } - if !config.Datadog().GetBool("ecs_task_collection_enabled") { + if !pkgconfigsetup.Datadog().GetBool("ecs_task_collection_enabled") { return false } diff --git a/pkg/orchestrator/redact/data_scrubber.go b/pkg/orchestrator/redact/data_scrubber.go index 2a2a60cacccd4..4650727302a28 100644 --- a/pkg/orchestrator/redact/data_scrubber.go +++ b/pkg/orchestrator/redact/data_scrubber.go @@ -107,7 +107,10 @@ func (ds *DataScrubber) ScrubSimpleCommand(cmdline []string) ([]string, bool) { } } - newCmdline := strings.Split(rawCmdline, " ") + // Regex tokenizes by capturing non-whitespace terms as tokens EX: "agent --secret" > ["agent", "--secret"] + // and non-whitespace terms followed by quotation enclosed subcomponents as tokens EX: "agent --pass="secret house"" > ["agent", "--pass="secret house""] + r := regexp.MustCompile(`([^\s"']+("([^"]*)")*('([^']*)')*)`) + newCmdline := r.FindAllString(rawCmdline, -1) // preprocess, without the preprocessing we would need to strip until whitespaces. // the first index can be skipped because it should be the program name. diff --git a/pkg/orchestrator/redact/data_scrubber_test.go b/pkg/orchestrator/redact/data_scrubber_test.go index 49a738b7634b6..9a9f63262a354 100644 --- a/pkg/orchestrator/redact/data_scrubber_test.go +++ b/pkg/orchestrator/redact/data_scrubber_test.go @@ -181,6 +181,8 @@ type testCase struct { func setupSensitiveCmdLines() []testCase { return []testCase{ // in case the "keyword" is part of the command itself + {[]string{"process --password=\"Data Source another_password=12345\""}, []string{"process", "--password=********"}}, + {[]string{"process --password:'Data Source another_pass=12345'"}, []string{"process", "--password:********"}}, {[]string{"agent", "-password////:123"}, []string{"agent", "-password////:********"}}, {[]string{"agent", "-password", "1234"}, []string{"agent", "-password", "********"}}, {[]string{"agent --password > /password/secret; agent --password echo >> /etc"}, []string{"agent", "--password", "********", "/password/secret;", "agent", "--password", "********", ">>", "/etc"}}, @@ -197,8 +199,8 @@ func setupSensitiveCmdLines() []testCase { {[]string{""}, []string{""}}, {[]string{"", ""}, []string{"", ""}}, // in case the "password" only consist of whitespaces we can assume that it is not something we need to mask - {[]string{"agent password "}, []string{"agent", "password", "", "", "", ""}}, - {[]string{"agent", "password", ""}, []string{"agent", "password", ""}}, + {[]string{"agent password "}, []string{"agent", "password"}}, + {[]string{"agent", "password", ""}, []string{"agent", "password"}}, {[]string{"agent", "password"}, []string{"agent", "password"}}, {[]string{"agent", "-password"}, []string{"agent", "-password"}}, {[]string{"agent -password"}, []string{"agent", "-password"}}, @@ -216,7 +218,7 @@ func setupSensitiveCmdLines() []testCase { {[]string{"agent", "-PASSWORD", "1234"}, []string{"agent", "-PASSWORD", "********"}}, {[]string{"agent", "--PASSword", "1234"}, []string{"agent", "--PASSword", "********"}}, {[]string{"agent", "--PaSsWoRd=1234"}, []string{"agent", "--PaSsWoRd=********"}}, - {[]string{"java -password 1234"}, []string{"java", "-password", "", "", "", "", "", "********"}}, + {[]string{"java -password 1234"}, []string{"java", "-password", "********"}}, {[]string{"process-agent --config=datadog.yaml --pid=process-agent.pid"}, []string{"process-agent", "--config=********", "--pid=********"}}, {[]string{"1-password --config=12345"}, []string{"1-password", "--config=********"}}, // not working {[]string{"java kafka password 1234"}, []string{"java", "kafka", "password", "********"}}, @@ -244,36 +246,35 @@ func setupCmdlinesWithWildCards() []testCase { {[]string{"spidly --befpass=2043 onebefpass 1234 --befpassCustom=1234"}, []string{"spidly", "--befpass=********", "onebefpass", "********", "--befpassCustom=1234"}}, {[]string{"spidly --befpass=2043 onebefpass 1234 --befpassCustom=1234"}, - []string{"spidly", "", "", "--befpass=********", "", "", "onebefpass", "", "", "********", "", "", "--befpassCustom=1234"}}, + []string{"spidly", "--befpass=********", "onebefpass", "********", "--befpassCustom=1234"}}, {[]string{"spidly", "--afterpass=2043", "afterpass_1", "1234", "--befafterpass_1=1234"}, []string{"spidly", "--afterpass=********", "afterpass_1", "********", "--befafterpass_1=1234"}}, {[]string{"spidly --afterpass=2043 afterpass_1 1234 --befafterpass_1=1234"}, []string{"spidly", "--afterpass=********", "afterpass_1", "********", "--befafterpass_1=1234"}}, {[]string{"spidly --afterpass=2043 afterpass_1 1234 --befafterpass_1=1234"}, - []string{"spidly", "", "", "--afterpass=********", "", "", "afterpass_1", "", "", "********", "", "", "--befafterpass_1=1234"}}, + []string{"spidly", "--afterpass=********", "afterpass_1", "********", "--befafterpass_1=1234"}}, {[]string{"spidly", "both", "1234", "-dd_both", "1234", "bothafter", "1234", "--dd_bothafter=1234"}, []string{"spidly", "both", "********", "-dd_both", "********", "bothafter", "********", "--dd_bothafter=********"}}, {[]string{"spidly both 1234 -dd_both 1234 bothafter 1234 --dd_bothafter=1234"}, []string{"spidly", "both", "********", "-dd_both", "********", "bothafter", "********", "--dd_bothafter=********"}}, {[]string{"spidly both 1234 -dd_both 1234 bothafter 1234 --dd_bothafter=1234"}, - []string{"spidly", "", "", "both", "", "", "********", "", "", "-dd_both", "", "", "********", "", "", "bothafter", "", "", "********", "", "", "--dd_bothafter=********"}}, + []string{"spidly", "both", "********", "-dd_both", "********", "bothafter", "********", "--dd_bothafter=********"}}, {[]string{"spidly", "middle", "1234", "-mile", "1234", "--mill=1234"}, []string{"spidly", "middle", "********", "-mile", "********", "--mill=1234"}}, {[]string{"spidly middle 1234 -mile 1234 --mill=1234"}, []string{"spidly", "middle", "********", "-mile", "********", "--mill=1234"}}, {[]string{"spidly middle 1234 -mile 1234 --mill=1234"}, - []string{"spidly", "", "", "middle", "", "", "********", "", "", "-mile", "", "", "********", "", "", "--mill=1234"}}, + []string{"spidly", "middle", "********", "-mile", "********", "--mill=1234"}}, {[]string{"spidly", "--passwd=1234", "password", "1234", "-mypassword", "1234", "--passwords=12345,123456", "--mypasswords=1234,123456"}, []string{"spidly", "--passwd=********", "password", "********", "-mypassword", "********", "--passwords=********", "--mypasswords=********"}}, {[]string{"spidly --passwd=1234 password 1234 -mypassword 1234 --passwords=12345,123456 --mypasswords=1234,123456"}, []string{"spidly", "--passwd=********", "password", "********", "-mypassword", "********", "--passwords=********", "--mypasswords=********"}}, {[]string{"spidly --passwd=1234 password 1234 -mypassword 1234 --passwords=12345,123456 --mypasswords=1234,123456"}, - []string{"spidly", "", "", "--passwd=********", "", "", "password", "", "", "********", "", "", "-mypassword", "", "", "********", - "", "", "--passwords=********", "", "", "--mypasswords=********"}}, + []string{"spidly", "--passwd=********", "password", "********", "-mypassword", "********", "--passwords=********", "--mypasswords=********"}}, {[]string{"run-middle password 12345"}, []string{"run-middle", "password", "********"}}, {[]string{"generate-password -password 12345"}, []string{"generate-password", "-password", "********"}}, diff --git a/pkg/persistentcache/persistentcache.go b/pkg/persistentcache/persistentcache.go index c293a0b473fa8..8ff5092d436d0 100644 --- a/pkg/persistentcache/persistentcache.go +++ b/pkg/persistentcache/persistentcache.go @@ -12,7 +12,7 @@ import ( "regexp" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Invalid characters to clean up @@ -22,7 +22,7 @@ var invalidChars = regexp.MustCompile("[^a-zA-Z0-9_-]") // first prefix as directory, if present. This is useful for integrations, which // use the check_id formed with $check_name:$hash func getFileForKey(key string) (string, error) { - parent := config.Datadog().GetString("run_path") + parent := pkgconfigsetup.Datadog().GetString("run_path") paths := strings.SplitN(key, ":", 2) cleanedPath := invalidChars.ReplaceAllString(paths[0], "") if len(paths) == 1 { diff --git a/pkg/process/checks/checks.go b/pkg/process/checks/checks.go index 139abb92720dd..ec06e082350a2 100644 --- a/pkg/process/checks/checks.go +++ b/pkg/process/checks/checks.go @@ -12,8 +12,8 @@ import ( sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -35,6 +35,8 @@ type SysProbeConfig struct { SystemProbeAddress string // System probe process module on/off configuration ProcessModuleEnabled bool + // System probe network_tracer module on/off configuration + NetworkTracerModuleEnabled bool } // Check is an interface for Agent checks that collect data. Each check returns @@ -105,7 +107,7 @@ func (p CombinedRunResult) RealtimePayloads() []model.MessageBody { // All is a list of all runnable checks. Putting a check in here does not guarantee it will be run, // it just guarantees that the collector will be able to find the check. // If you want to add a check you MUST register it here. -func All(config, sysprobeYamlCfg ddconfig.ReaderWriter, syscfg *sysconfigtypes.Config, wmeta workloadmeta.Component, npCollector npcollector.Component) []Check { +func All(config, sysprobeYamlCfg pkgconfigmodel.ReaderWriter, syscfg *sysconfigtypes.Config, wmeta workloadmeta.Component, npCollector npcollector.Component) []Check { return []Check{ NewProcessCheck(config, sysprobeYamlCfg, wmeta), NewContainerCheck(config, wmeta), @@ -128,7 +130,7 @@ func RTName(checkName string) string { } } -func canEnableContainerChecks(config ddconfig.Reader, displayFeatureWarning bool) bool { +func canEnableContainerChecks(config pkgconfigmodel.Reader, displayFeatureWarning bool) bool { // The process and container checks are mutually exclusive if config.GetBool("process_config.process_collection.enabled") { return false diff --git a/pkg/process/checks/config.go b/pkg/process/checks/config.go index 160c0cb1dff0b..225e5c3e55414 100644 --- a/pkg/process/checks/config.go +++ b/pkg/process/checks/config.go @@ -6,32 +6,33 @@ package checks import ( - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) // getMaxBatchSize returns the maximum number of items (processes, containers, process_discoveries) in a check payload -var getMaxBatchSize = func(config ddconfig.Reader) int { +var getMaxBatchSize = func(config model.Reader) int { return ensureValidMaxBatchSize(config.GetInt("process_config.max_per_message")) } func ensureValidMaxBatchSize(batchSize int) int { - if batchSize <= 0 || batchSize > ddconfig.ProcessMaxPerMessageLimit { - log.Warnf("Invalid max item count per message (%d), using default value of %d", batchSize, ddconfig.DefaultProcessMaxPerMessage) - return ddconfig.DefaultProcessMaxPerMessage + if batchSize <= 0 || batchSize > pkgconfigsetup.ProcessMaxPerMessageLimit { + log.Warnf("Invalid max item count per message (%d), using default value of %d", batchSize, pkgconfigsetup.DefaultProcessMaxPerMessage) + return pkgconfigsetup.DefaultProcessMaxPerMessage } return batchSize } // getMaxBatchSize returns the maximum number of bytes in a check payload -var getMaxBatchBytes = func(config ddconfig.Reader) int { +var getMaxBatchBytes = func(config model.Reader) int { return ensureValidMaxBatchBytes(config.GetInt("process_config.max_message_bytes")) } func ensureValidMaxBatchBytes(batchBytes int) int { - if batchBytes <= 0 || batchBytes > ddconfig.ProcessMaxMessageBytesLimit { - log.Warnf("Invalid max byte size per message (%d), using default value of %d", batchBytes, ddconfig.DefaultProcessMaxMessageBytes) - return ddconfig.DefaultProcessMaxMessageBytes + if batchBytes <= 0 || batchBytes > pkgconfigsetup.ProcessMaxMessageBytesLimit { + log.Warnf("Invalid max byte size per message (%d), using default value of %d", batchBytes, pkgconfigsetup.DefaultProcessMaxMessageBytes) + return pkgconfigsetup.DefaultProcessMaxMessageBytes } return batchBytes } diff --git a/pkg/process/checks/config_test.go b/pkg/process/checks/config_test.go index d3a6b0fca6a2e..6f120d12da41d 100644 --- a/pkg/process/checks/config_test.go +++ b/pkg/process/checks/config_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestEnsureValidMaxBatchSize(t *testing.T) { @@ -33,17 +33,17 @@ func TestEnsureValidMaxBatchSize(t *testing.T) { { name: "invalid negative batch count", maxPerMessage: -1, - expectedMaxBatchSize: ddconfig.DefaultProcessMaxPerMessage, + expectedMaxBatchSize: pkgconfigsetup.DefaultProcessMaxPerMessage, }, { name: "invalid 0 max batch size", maxPerMessage: 0, - expectedMaxBatchSize: ddconfig.DefaultProcessMaxPerMessage, + expectedMaxBatchSize: pkgconfigsetup.DefaultProcessMaxPerMessage, }, { name: "invalid big max batch size", maxPerMessage: 20000, - expectedMaxBatchSize: ddconfig.DefaultProcessMaxPerMessage, + expectedMaxBatchSize: pkgconfigsetup.DefaultProcessMaxPerMessage, }, } @@ -74,17 +74,17 @@ func TestEnsureValidMaxBatchBytes(t *testing.T) { { name: "invalid negative batch size", maxMessageBytes: -1, - expectedMaxBatchBytes: ddconfig.DefaultProcessMaxMessageBytes, + expectedMaxBatchBytes: pkgconfigsetup.DefaultProcessMaxMessageBytes, }, { name: "invalid 0 max batch size", maxMessageBytes: 0, - expectedMaxBatchBytes: ddconfig.DefaultProcessMaxMessageBytes, + expectedMaxBatchBytes: pkgconfigsetup.DefaultProcessMaxMessageBytes, }, { name: "invalid big max batch size", maxMessageBytes: 20000000, - expectedMaxBatchBytes: ddconfig.DefaultProcessMaxMessageBytes, + expectedMaxBatchBytes: pkgconfigsetup.DefaultProcessMaxMessageBytes, }, } diff --git a/pkg/process/checks/container.go b/pkg/process/checks/container.go index de3e40fd00b43..590cb8f9ac359 100644 --- a/pkg/process/checks/container.go +++ b/pkg/process/checks/container.go @@ -6,7 +6,6 @@ package checks import ( - "context" "fmt" "math" "sync" @@ -15,10 +14,10 @@ import ( model "github.com/DataDog/agent-payload/v5/process" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/process/statsd" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" - "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -28,7 +27,7 @@ const ( ) // NewContainerCheck returns an instance of the ContainerCheck. -func NewContainerCheck(config ddconfig.Reader, wmeta workloadmeta.Component) *ContainerCheck { +func NewContainerCheck(config pkgconfigmodel.Reader, wmeta workloadmeta.Component) *ContainerCheck { return &ContainerCheck{ config: config, wmeta: wmeta, @@ -39,7 +38,7 @@ func NewContainerCheck(config ddconfig.Reader, wmeta workloadmeta.Component) *Co type ContainerCheck struct { sync.Mutex - config ddconfig.Reader + config pkgconfigmodel.Reader hostInfo *HostInfo containerProvider proccontainers.ContainerProvider @@ -53,11 +52,21 @@ type ContainerCheck struct { } // Init initializes a ContainerCheck instance. -func (c *ContainerCheck) Init(_ *SysProbeConfig, info *HostInfo, _ bool) error { +func (c *ContainerCheck) Init(syscfg *SysProbeConfig, info *HostInfo, _ bool) error { c.containerProvider = proccontainers.GetSharedContainerProvider(c.wmeta) c.hostInfo = info - networkID, err := cloudproviders.GetNetworkID(context.TODO()) + var tu *net.RemoteSysProbeUtil + var err error + if syscfg.NetworkTracerModuleEnabled { + // Calling the remote tracer will cause it to initialize and check connectivity + tu, err = net.GetRemoteSystemProbeUtil(syscfg.SystemProbeAddress) + if err != nil { + log.Warnf("could not initiate connection with system probe: %s", err) + } + } + + networkID, err := retryGetNetworkID(tu) if err != nil { log.Infof("no network ID detected: %s", err) } diff --git a/pkg/process/checks/container_rt.go b/pkg/process/checks/container_rt.go index 42a9634c6f5d8..452323de8e7a5 100644 --- a/pkg/process/checks/container_rt.go +++ b/pkg/process/checks/container_rt.go @@ -11,7 +11,7 @@ import ( model "github.com/DataDog/agent-payload/v5/process" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -23,7 +23,7 @@ const ( ) // NewRTContainerCheck returns an instance of the RTContainerCheck. -func NewRTContainerCheck(config ddconfig.Reader, wmeta workloadmeta.Component) *RTContainerCheck { +func NewRTContainerCheck(config pkgconfigmodel.Reader, wmeta workloadmeta.Component) *RTContainerCheck { return &RTContainerCheck{ config: config, wmeta: wmeta, @@ -36,7 +36,7 @@ type RTContainerCheck struct { hostInfo *HostInfo containerProvider proccontainers.ContainerProvider lastRates map[string]*proccontainers.ContainerRateMetrics - config ddconfig.Reader + config pkgconfigmodel.Reader wmeta workloadmeta.Component } diff --git a/pkg/process/checks/enable_checks_containerized_test.go b/pkg/process/checks/enable_checks_containerized_test.go index aad1d013d9500..92111e391802d 100644 --- a/pkg/process/checks/enable_checks_containerized_test.go +++ b/pkg/process/checks/enable_checks_containerized_test.go @@ -18,7 +18,6 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector/npcollectorimpl" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/flavor" @@ -39,7 +38,7 @@ func TestContainerCheck(t *testing.T) { cfg.SetWithoutSource("process_config.process_collection.enabled", false) cfg.SetWithoutSource("process_config.container_collection.enabled", true) cfg.SetWithoutSource("process_config.disable_realtime_checks", false) - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) enabledChecks := getEnabledChecks(t, cfg, configmock.NewSystemProbe(t), deps.WMeta, deps.NpCollector) assertContainsCheck(t, enabledChecks, ContainerCheckName) @@ -54,7 +53,7 @@ func TestContainerCheck(t *testing.T) { cfg.SetWithoutSource("process_config.process_collection.enabled", false) cfg.SetWithoutSource("process_config.container_collection.enabled", true) cfg.SetWithoutSource("process_config.disable_realtime_checks", true) - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) enabledChecks := getEnabledChecks(t, cfg, configmock.NewSystemProbe(t), deps.WMeta, deps.NpCollector) assertContainsCheck(t, enabledChecks, ContainerCheckName) @@ -80,7 +79,7 @@ func TestContainerCheck(t *testing.T) { cfg := configmock.New(t) cfg.SetWithoutSource("process_config.process_collection.enabled", true) cfg.SetWithoutSource("process_config.container_collection.enabled", true) - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) enabledChecks := getEnabledChecks(t, cfg, configmock.NewSystemProbe(t), deps.WMeta, deps.NpCollector) assertContainsCheck(t, enabledChecks, ProcessCheckName) @@ -96,7 +95,7 @@ func TestContainerCheck(t *testing.T) { cfg.SetWithoutSource("process_config.process_collection.enabled", false) cfg.SetWithoutSource("process_config.container_collection.enabled", true) cfg.SetWithoutSource("process_config.run_in_core_agent.enabled", true) - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) flavor.SetFlavor("process_agent") enabledChecks := getEnabledChecks(t, cfg, scfg, deps.WMeta, deps.NpCollector) @@ -135,7 +134,7 @@ func TestDisableRealTime(t *testing.T) { mockConfig := configmock.New(t) mockConfig.SetWithoutSource("process_config.disable_realtime_checks", tc.disableRealtime) mockConfig.SetWithoutSource("process_config.process_discovery.enabled", false) // Not an RT check so we don't care - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) enabledChecks := getEnabledChecks(t, mockConfig, configmock.NewSystemProbe(t), deps.WMeta, deps.NpCollector) assert.EqualValues(tc.expectedChecks, enabledChecks) diff --git a/pkg/process/checks/enabled_checks_test.go b/pkg/process/checks/enabled_checks_test.go index 7b4aaea81c38f..c6e4bd07659bb 100644 --- a/pkg/process/checks/enabled_checks_test.go +++ b/pkg/process/checks/enabled_checks_test.go @@ -19,8 +19,8 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector/npcollectorimpl" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -35,7 +35,7 @@ func assertNotContainsCheck(t *testing.T, checks []string, name string) { assert.NotContains(t, checks, name) } -func getEnabledChecks(t *testing.T, cfg, sysprobeYamlConfig config.ReaderWriter, wmeta workloadmeta.Component, npCollector npcollector.Component) []string { +func getEnabledChecks(t *testing.T, cfg, sysprobeYamlConfig pkgconfigmodel.ReaderWriter, wmeta workloadmeta.Component, npCollector npcollector.Component) []string { sysprobeConfigStruct, err := sysconfig.New("", "") require.NoError(t, err) diff --git a/pkg/process/checks/host_info.go b/pkg/process/checks/host_info.go index baaf70890e647..ba5c30e08341f 100644 --- a/pkg/process/checks/host_info.go +++ b/pkg/process/checks/host_info.go @@ -17,7 +17,8 @@ import ( model "github.com/DataDog/agent-payload/v5/process" "google.golang.org/grpc" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fargate" "github.com/DataDog/datadog-agent/pkg/util/flavor" @@ -39,7 +40,7 @@ type HostInfo struct { } // CollectHostInfo collects host information -func CollectHostInfo(config config.Reader) (*HostInfo, error) { +func CollectHostInfo(config pkgconfigmodel.Reader) (*HostInfo, error) { sysInfo, err := CollectSystemInfo() if err != nil { return nil, err @@ -57,7 +58,7 @@ func CollectHostInfo(config config.Reader) (*HostInfo, error) { }, nil } -func resolveHostName(config config.Reader) (string, error) { +func resolveHostName(config pkgconfigmodel.Reader) (string, error) { // use the common agent hostname utility when not running in the process-agent if flavor.GetFlavor() != flavor.ProcessAgent { hostName, err := coreAgentGetHostname(context.TODO()) @@ -147,12 +148,12 @@ func getHostnameFromGRPC(ctx context.Context, grpcClientFn func(ctx context.Cont ctx, cancel := context.WithTimeout(ctx, grpcConnectionTimeout) defer cancel() - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } - ddAgentClient, err := grpcClientFn(ctx, ipcAddress, config.GetIPCPort()) + ddAgentClient, err := grpcClientFn(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return "", fmt.Errorf("cannot connect to datadog agent via grpc: %w", err) } diff --git a/pkg/process/checks/host_info_test.go b/pkg/process/checks/host_info_test.go index ad19147d649b8..3dc57ffb98e15 100644 --- a/pkg/process/checks/host_info_test.go +++ b/pkg/process/checks/host_info_test.go @@ -20,8 +20,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" pbmocks "github.com/DataDog/datadog-agent/pkg/proto/pbgo/mocks/core" "github.com/DataDog/datadog-agent/pkg/util/flavor" @@ -55,7 +55,7 @@ func TestGetHostnameFromGRPC(t *testing.T) { t.Run("hostname returns from grpc", func(t *testing.T) { hostname, err := getHostnameFromGRPC(ctx, func(_ context.Context, _, _ string, _ ...grpc.DialOption) (pb.AgentClient, error) { return mockClient, nil - }, config.DefaultGRPCConnectionTimeoutSecs*time.Second) + }, pkgconfigsetup.DefaultGRPCConnectionTimeoutSecs*time.Second) assert.Nil(t, err) assert.Equal(t, "unit-test-hostname", hostname) @@ -65,7 +65,7 @@ func TestGetHostnameFromGRPC(t *testing.T) { grpcErr := errors.New("no grpc client") hostname, err := getHostnameFromGRPC(ctx, func(_ context.Context, _, _ string, _ ...grpc.DialOption) (pb.AgentClient, error) { return nil, grpcErr - }, config.DefaultGRPCConnectionTimeoutSecs*time.Second) + }, pkgconfigsetup.DefaultGRPCConnectionTimeoutSecs*time.Second) assert.NotNil(t, err) assert.Equal(t, grpcErr, errors.Unwrap(err)) diff --git a/pkg/process/checks/interval.go b/pkg/process/checks/interval.go index b1f44ad6b9cd7..4e02b47ccd10a 100644 --- a/pkg/process/checks/interval.go +++ b/pkg/process/checks/interval.go @@ -8,7 +8,8 @@ package checks import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -48,7 +49,7 @@ var ( RTContainerCheckName: RTContainerCheckDefaultInterval, ConnectionsCheckName: ConnectionsCheckDefaultInterval, DiscoveryCheckName: ProcessDiscoveryCheckDefaultInterval, - ProcessEventsCheckName: config.DefaultProcessEventsCheckInterval, + ProcessEventsCheckName: pkgconfigsetup.DefaultProcessEventsCheckInterval, } configKeys = map[string]string{ @@ -66,7 +67,7 @@ func GetDefaultInterval(checkName string) time.Duration { } // GetInterval returns the configured check interval value -func GetInterval(cfg config.Reader, checkName string) time.Duration { +func GetInterval(cfg pkgconfigmodel.Reader, checkName string) time.Duration { switch checkName { case DiscoveryCheckName: // We don't need to check if the key exists since we already bound it to a default in InitConfig. @@ -80,10 +81,10 @@ func GetInterval(cfg config.Reader, checkName string) time.Duration { case ProcessEventsCheckName: eventsInterval := cfg.GetDuration("process_config.event_collection.interval") - if eventsInterval < config.DefaultProcessEventsMinCheckInterval { - eventsInterval = config.DefaultProcessEventsCheckInterval + if eventsInterval < pkgconfigsetup.DefaultProcessEventsMinCheckInterval { + eventsInterval = pkgconfigsetup.DefaultProcessEventsCheckInterval _ = log.Warnf("Invalid interval for process_events check (< %s) using default value of %s", - config.DefaultProcessEventsMinCheckInterval.String(), config.DefaultProcessEventsCheckInterval.String()) + pkgconfigsetup.DefaultProcessEventsMinCheckInterval.String(), pkgconfigsetup.DefaultProcessEventsCheckInterval.String()) } return eventsInterval diff --git a/pkg/process/checks/interval_test.go b/pkg/process/checks/interval_test.go index 3cfc5a26bce97..ecb73709e6a26 100644 --- a/pkg/process/checks/interval_test.go +++ b/pkg/process/checks/interval_test.go @@ -11,8 +11,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestLegacyIntervalDefault(t *testing.T) { @@ -137,7 +137,7 @@ func TestProcessEventsInterval(t *testing.T) { { name: "below minimum", interval: 0, - expectedInterval: config.DefaultProcessEventsCheckInterval, + expectedInterval: pkgconfigsetup.DefaultProcessEventsCheckInterval, }, } { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/process/checks/net.go b/pkg/process/checks/net.go index 26e01d0677061..13b33d87f9dca 100644 --- a/pkg/process/checks/net.go +++ b/pkg/process/checks/net.go @@ -21,7 +21,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/network/dns" "github.com/DataDog/datadog-agent/pkg/process/metadata/parser" "github.com/DataDog/datadog-agent/pkg/process/net" @@ -47,7 +47,7 @@ var ( ) // NewConnectionsCheck returns an instance of the ConnectionsCheck. -func NewConnectionsCheck(config, sysprobeYamlConfig config.Reader, syscfg *sysconfigtypes.Config, wmeta workloadmeta.Component, npCollector npcollector.Component) *ConnectionsCheck { +func NewConnectionsCheck(config, sysprobeYamlConfig pkgconfigmodel.Reader, syscfg *sysconfigtypes.Config, wmeta workloadmeta.Component, npCollector npcollector.Component) *ConnectionsCheck { return &ConnectionsCheck{ config: config, syscfg: syscfg, @@ -60,8 +60,8 @@ func NewConnectionsCheck(config, sysprobeYamlConfig config.Reader, syscfg *sysco // ConnectionsCheck collects statistics about live TCP and UDP connections. type ConnectionsCheck struct { syscfg *sysconfigtypes.Config - sysprobeYamlConfig config.Reader - config config.Reader + sysprobeYamlConfig pkgconfigmodel.Reader + config pkgconfigmodel.Reader hostInfo *HostInfo maxConnsPerMessage int @@ -107,7 +107,7 @@ func (c *ConnectionsCheck) Init(syscfg *SysProbeConfig, hostInfo *HostInfo, _ bo } } - networkID, err := cloudproviders.GetNetworkID(context.TODO()) + networkID, err := retryGetNetworkID(tu) if err != nil { log.Infof("no network ID detected: %s", err) } @@ -212,7 +212,7 @@ func (c *ConnectionsCheck) getConnections() (*model.Connections, error) { return tu.GetConnections(c.tracerClientID) } -func (c *ConnectionsCheck) notifyProcessConnRates(config config.Reader, conns *model.Connections) { +func (c *ConnectionsCheck) notifyProcessConnRates(config pkgconfigmodel.Reader, conns *model.Connections) { if len(c.processConnRatesTransmitter.Chs) == 0 { return } @@ -503,3 +503,17 @@ func convertAndEnrichWithServiceCtx(tags []string, tagOffsets []uint32, serviceC return tagsStr } + +// fetches network_id from the current netNS or from the system probe if necessary, where the root netNS is used +func retryGetNetworkID(sysProbeUtil *net.RemoteSysProbeUtil) (string, error) { + networkID, err := cloudproviders.GetNetworkID(context.TODO()) + if err != nil && sysProbeUtil != nil { + log.Infof("no network ID detected. retrying via system-probe: %s", err) + networkID, err = sysProbeUtil.GetNetworkID() + if err != nil { + log.Infof("failed to get network ID from system-probe: %s", err) + return "", err + } + } + return networkID, err +} diff --git a/pkg/process/checks/process.go b/pkg/process/checks/process.go index f35e71704d1cb..5d905a135702c 100644 --- a/pkg/process/checks/process.go +++ b/pkg/process/checks/process.go @@ -6,7 +6,6 @@ package checks import ( - "context" "errors" "fmt" "math" @@ -19,7 +18,8 @@ import ( "go.uber.org/atomic" workloadmetacomp "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/metadata" "github.com/DataDog/datadog-agent/pkg/process/metadata/parser" "github.com/DataDog/datadog-agent/pkg/process/metadata/workloadmeta" @@ -28,7 +28,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/process/statsd" "github.com/DataDog/datadog-agent/pkg/process/util" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" - "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/subscriptions" @@ -45,7 +44,7 @@ const ( ) // NewProcessCheck returns an instance of the ProcessCheck. -func NewProcessCheck(config ddconfig.Reader, sysprobeYamlConfig ddconfig.Reader, wmeta workloadmetacomp.Component) *ProcessCheck { +func NewProcessCheck(config pkgconfigmodel.Reader, sysprobeYamlConfig pkgconfigmodel.Reader, wmeta workloadmetacomp.Component) *ProcessCheck { serviceExtractorEnabled := true useWindowsServiceName := sysprobeYamlConfig.GetBool("system_probe_config.process_service_inference.use_windows_service_name") useImprovedAlgorithm := sysprobeYamlConfig.GetBool("system_probe_config.process_service_inference.use_improved_algorithm") @@ -71,7 +70,7 @@ const ( // for live and running processes. The instance will store some state between // checks that will be used for rates, cpu calculations, etc. type ProcessCheck struct { - config ddconfig.Reader + config pkgconfigmodel.Reader probe procutil.Probe // scrubber is a DataScrubber to hide command line sensitive words @@ -137,7 +136,17 @@ func (p *ProcessCheck) Init(syscfg *SysProbeConfig, info *HostInfo, oneShot bool p.notInitializedLogLimit = log.NewLogLimit(1, time.Minute*10) - networkID, err := cloudproviders.GetNetworkID(context.TODO()) + var tu *net.RemoteSysProbeUtil + var err error + if syscfg.NetworkTracerModuleEnabled { + // Calling the remote tracer will cause it to initialize and check connectivity + tu, err = net.GetRemoteSystemProbeUtil(syscfg.SystemProbeAddress) + if err != nil { + log.Warnf("could not initiate connection with system probe: %s", err) + } + } + + networkID, err := retryGetNetworkID(tu) if err != nil { log.Infof("no network ID detected: %s", err) } @@ -149,8 +158,8 @@ func (p *ProcessCheck) Init(syscfg *SysProbeConfig, info *HostInfo, oneShot bool p.skipAmount = uint32(p.config.GetInt32("process_config.process_discovery.hint_frequency")) if p.skipAmount == 0 { log.Warnf("process_config.process_discovery.hint_frequency must be greater than 0. using default value %d", - ddconfig.DefaultProcessDiscoveryHintFrequency) - p.skipAmount = ddconfig.DefaultProcessDiscoveryHintFrequency + pkgconfigsetup.DefaultProcessDiscoveryHintFrequency) + p.skipAmount = pkgconfigsetup.DefaultProcessDiscoveryHintFrequency } initScrubber(p.config, p.scrubber) @@ -164,7 +173,7 @@ func (p *ProcessCheck) Init(syscfg *SysProbeConfig, info *HostInfo, oneShot bool p.extractors = append(p.extractors, p.serviceExtractor) if !oneShot && workloadmeta.Enabled(p.config) { - p.workloadMetaExtractor = workloadmeta.GetSharedWorkloadMetaExtractor(ddconfig.SystemProbe()) + p.workloadMetaExtractor = workloadmeta.GetSharedWorkloadMetaExtractor(pkgconfigsetup.SystemProbe()) // The server is only needed on the process agent if !p.config.GetBool("process_config.run_in_core_agent.enabled") && flavor.GetFlavor() == flavor.ProcessAgent { @@ -681,7 +690,7 @@ func mergeProcWithSysprobeStats(pids []int32, procs map[int32]*procutil.Process, } } -func initScrubber(config ddconfig.Reader, scrubber *procutil.DataScrubber) { +func initScrubber(config pkgconfigmodel.Reader, scrubber *procutil.DataScrubber) { // Enable/Disable the DataScrubber to obfuscate process args if config.IsSet(configScrubArgs) { scrubber.Enabled = config.GetBool(configScrubArgs) @@ -705,7 +714,7 @@ func initScrubber(config ddconfig.Reader, scrubber *procutil.DataScrubber) { } } -func initDisallowList(config ddconfig.Reader) []*regexp.Regexp { +func initDisallowList(config pkgconfigmodel.Reader) []*regexp.Regexp { var disallowList []*regexp.Regexp // A list of regex patterns that will exclude a process if matched. if config.IsSet(configDisallowList) { diff --git a/pkg/process/checks/process_data.go b/pkg/process/checks/process_data.go index bda9c947f37dc..90585934ff05c 100644 --- a/pkg/process/checks/process_data.go +++ b/pkg/process/checks/process_data.go @@ -8,7 +8,7 @@ package checks import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/metadata" "github.com/DataDog/datadog-agent/pkg/process/procutil" ) @@ -22,7 +22,7 @@ type ProcessData struct { } // NewProcessData returns a new ProcessData from the given config -func NewProcessData(cfg config.Reader) *ProcessData { +func NewProcessData(cfg pkgconfigmodel.Reader) *ProcessData { return &ProcessData{ probe: newProcessProbe(cfg), } diff --git a/pkg/process/checks/process_discovery_check.go b/pkg/process/checks/process_discovery_check.go index c5a3b65069b70..967d29dc8f62a 100644 --- a/pkg/process/checks/process_discovery_check.go +++ b/pkg/process/checks/process_discovery_check.go @@ -9,8 +9,8 @@ import ( "fmt" "time" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -20,7 +20,7 @@ import ( ) // NewProcessDiscoveryCheck returns an instance of the ProcessDiscoveryCheck. -func NewProcessDiscoveryCheck(config ddconfig.Reader) *ProcessDiscoveryCheck { +func NewProcessDiscoveryCheck(config pkgconfigmodel.Reader) *ProcessDiscoveryCheck { return &ProcessDiscoveryCheck{ config: config, scrubber: procutil.NewDefaultDataScrubber(), @@ -32,7 +32,7 @@ func NewProcessDiscoveryCheck(config ddconfig.Reader) *ProcessDiscoveryCheck { // It uses its own ProcessDiscovery payload. // The goal of this check is to collect information about possible integrations that may be enabled by the end user. type ProcessDiscoveryCheck struct { - config ddconfig.Reader + config pkgconfigmodel.Reader probe procutil.Probe scrubber *procutil.DataScrubber diff --git a/pkg/process/checks/process_discovery_check_test.go b/pkg/process/checks/process_discovery_check_test.go index d20dac12f0bfd..233b863753f43 100644 --- a/pkg/process/checks/process_discovery_check_test.go +++ b/pkg/process/checks/process_discovery_check_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/process/procutil/mocks" ) @@ -50,7 +50,7 @@ func TestProcessDiscoveryCheck(t *testing.T) { }() maxBatchSize := 10 - getMaxBatchSize = func(config.Reader) int { return maxBatchSize } + getMaxBatchSize = func(pkgconfigmodel.Reader) int { return maxBatchSize } check := NewProcessDiscoveryCheck(configmock.New(t)) check.Init( diff --git a/pkg/process/checks/process_events_fallback.go b/pkg/process/checks/process_events_fallback.go index 4b1f864357228..b49b54378843d 100644 --- a/pkg/process/checks/process_events_fallback.go +++ b/pkg/process/checks/process_events_fallback.go @@ -10,11 +10,11 @@ package checks import ( "errors" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" ) // NewProcessEventsCheck returns an instance of the ProcessEventsCheck. -func NewProcessEventsCheck(config config.Reader) *ProcessEventsCheck { +func NewProcessEventsCheck(config pkgconfigmodel.Reader) *ProcessEventsCheck { return &ProcessEventsCheck{ config: config, } @@ -22,7 +22,7 @@ func NewProcessEventsCheck(config config.Reader) *ProcessEventsCheck { // ProcessEventsCheck collects process lifecycle events such as exec and exit signals type ProcessEventsCheck struct { - config config.Reader + config pkgconfigmodel.Reader } // Init initializes the ProcessEventsCheck. diff --git a/pkg/process/checks/process_events_linux.go b/pkg/process/checks/process_events_linux.go index 0c9154457d73b..630b31f383112 100644 --- a/pkg/process/checks/process_events_linux.go +++ b/pkg/process/checks/process_events_linux.go @@ -16,7 +16,7 @@ import ( payload "github.com/DataDog/agent-payload/v5/process" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/events" "github.com/DataDog/datadog-agent/pkg/process/events/model" "github.com/DataDog/datadog-agent/pkg/process/statsd" @@ -24,7 +24,7 @@ import ( ) // NewProcessEventsCheck returns an instance of the ProcessEventsCheck. -func NewProcessEventsCheck(config ddconfig.Reader) *ProcessEventsCheck { +func NewProcessEventsCheck(config pkgconfigmodel.Reader) *ProcessEventsCheck { return &ProcessEventsCheck{ config: config, } @@ -34,7 +34,7 @@ func NewProcessEventsCheck(config ddconfig.Reader) *ProcessEventsCheck { type ProcessEventsCheck struct { initMutex sync.Mutex - config ddconfig.Reader + config pkgconfigmodel.Reader store events.Store listener *events.SysProbeListener diff --git a/pkg/process/checks/process_probe.go b/pkg/process/checks/process_probe.go index d77f622259590..2f4f94272ba01 100644 --- a/pkg/process/checks/process_probe.go +++ b/pkg/process/checks/process_probe.go @@ -8,10 +8,10 @@ package checks import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/procutil" ) -func newProcessProbe(_ config.Reader, options ...procutil.Option) procutil.Probe { +func newProcessProbe(_ pkgconfigmodel.Reader, options ...procutil.Option) procutil.Probe { return procutil.NewProcessProbe(options...) } diff --git a/pkg/process/checks/process_probe_windows.go b/pkg/process/checks/process_probe_windows.go index 25bc83e7c259b..b8e3e29720935 100644 --- a/pkg/process/checks/process_probe_windows.go +++ b/pkg/process/checks/process_probe_windows.go @@ -6,12 +6,12 @@ package checks import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/util/log" ) -func newProcessProbe(config config.Reader, options ...procutil.Option) procutil.Probe { +func newProcessProbe(config pkgconfigmodel.Reader, options ...procutil.Option) procutil.Probe { if !config.GetBool("process_config.windows.use_perf_counters") { log.Info("Using toolhelp API probe for process data collection") return procutil.NewWindowsToolhelpProbe() diff --git a/pkg/process/checks/user_nix.go b/pkg/process/checks/user_nix.go index 7e99159c48ad3..e7412aeade221 100644 --- a/pkg/process/checks/user_nix.go +++ b/pkg/process/checks/user_nix.go @@ -13,13 +13,13 @@ import ( "github.com/patrickmn/go-cache" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) //nolint:revive // TODO(PROC) Fix revive linter type LookupIdProbe struct { - config config.Reader + config pkgconfigmodel.Reader lookupIdCache *cache.Cache //nolint:revive // TODO(PROC) Fix revive linter @@ -27,7 +27,7 @@ type LookupIdProbe struct { } // NewLookupIDProbe returns a new LookupIdProbe from the config -func NewLookupIDProbe(coreConfig config.Reader) *LookupIdProbe { +func NewLookupIDProbe(coreConfig pkgconfigmodel.Reader) *LookupIdProbe { if coreConfig.GetBool("process_config.cache_lookupid") { log.Debug("Using cached calls to `user.LookupID`") } diff --git a/pkg/process/checks/user_windows.go b/pkg/process/checks/user_windows.go index b50dd2ceae7c9..6b5196a3fea3a 100644 --- a/pkg/process/checks/user_windows.go +++ b/pkg/process/checks/user_windows.go @@ -8,7 +8,7 @@ package checks import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" ) // On Windows the LookupIdProbe does nothing since we get the user info from the process itself. @@ -19,6 +19,6 @@ type LookupIdProbe struct{} // NewLookupIDProbe returns a new LookupIdProbe // //nolint:revive // TODO(PROC) Fix revive linter -func NewLookupIDProbe(config.Reader) *LookupIdProbe { +func NewLookupIDProbe(pkgconfigmodel.Reader) *LookupIdProbe { return &LookupIdProbe{} } diff --git a/pkg/process/events/listener_linux.go b/pkg/process/events/listener_linux.go index 787886091151f..4a59d29f3bec6 100644 --- a/pkg/process/events/listener_linux.go +++ b/pkg/process/events/listener_linux.go @@ -20,7 +20,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/eventmonitor/proto/api" "github.com/DataDog/datadog-agent/pkg/process/events/model" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -45,7 +45,7 @@ type SysProbeListener struct { // NewListener returns a new SysProbeListener to listen for process events func NewListener(handler EventHandler) (*SysProbeListener, error) { - socketPath := ddconfig.SystemProbe().GetString("event_monitoring_config.socket") + socketPath := pkgconfigsetup.SystemProbe().GetString("event_monitoring_config.socket") if socketPath == "" { return nil, errors.New("event_monitoring_config.socket must be set") } diff --git a/pkg/process/events/store.go b/pkg/process/events/store.go index 62145898d19b0..4d6767ac31e3c 100644 --- a/pkg/process/events/store.go +++ b/pkg/process/events/store.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" "go.uber.org/atomic" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/events/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -80,7 +80,7 @@ type RingStore struct { } // readPositiveInt reads a config stored in the given key and asserts that it's a positive value -func readPositiveInt(cfg config.Reader, key string) (int, error) { +func readPositiveInt(cfg pkgconfigmodel.Reader, key string) (int, error) { i := cfg.GetInt(key) if i <= 0 { return 0, fmt.Errorf("invalid setting. %s must be > 0", key) @@ -90,7 +90,7 @@ func readPositiveInt(cfg config.Reader, key string) (int, error) { } // NewRingStore creates a new RingStore to store process events -func NewRingStore(cfg config.Reader, client statsd.ClientInterface) (Store, error) { +func NewRingStore(cfg pkgconfigmodel.Reader, client statsd.ClientInterface) (Store, error) { maxItems, err := readPositiveInt(cfg, "process_config.event_collection.store.max_items") if err != nil { return nil, err diff --git a/pkg/process/metadata/parser/service_windows_test.go b/pkg/process/metadata/parser/service_windows_test.go index fa49194059515..b1d748df5e4d1 100644 --- a/pkg/process/metadata/parser/service_windows_test.go +++ b/pkg/process/metadata/parser/service_windows_test.go @@ -13,8 +13,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/util/winutil" ) @@ -90,7 +90,7 @@ func TestWindowsExtractServiceMetadata(t *testing.T) { } func TestWindowsExtractServiceWithSCMReader(t *testing.T) { - makeServiceExtractor := func(t *testing.T, sysprobeConfig ddconfig.Reader) (*ServiceExtractor, *mockSCM) { + makeServiceExtractor := func(t *testing.T, sysprobeConfig pkgconfigmodel.Reader) (*ServiceExtractor, *mockSCM) { enabled := sysprobeConfig.GetBool("system_probe_config.process_service_inference.enabled") useWindowsServiceName := sysprobeConfig.GetBool("system_probe_config.process_service_inference.use_windows_service_name") useImprovedAlgorithm := sysprobeConfig.GetBool("system_probe_config.process_service_inference.use_improved_algorithm") diff --git a/pkg/process/metadata/workloadmeta/collector/process.go b/pkg/process/metadata/workloadmeta/collector/process.go index 76801fdb71a79..bc9d9ea14ea0c 100644 --- a/pkg/process/metadata/workloadmeta/collector/process.go +++ b/pkg/process/metadata/workloadmeta/collector/process.go @@ -13,7 +13,7 @@ import ( "github.com/benbjohnson/clock" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/checks" workloadmetaExtractor "github.com/DataDog/datadog-agent/pkg/process/metadata/workloadmeta" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" @@ -26,7 +26,7 @@ const ( ) // NewProcessCollector creates a new process collector. -func NewProcessCollector(coreConfig, sysProbeConfig config.Reader) *Collector { +func NewProcessCollector(coreConfig, sysProbeConfig pkgconfigmodel.Reader) *Collector { wlmExtractor := workloadmetaExtractor.NewWorkloadMetaExtractor(sysProbeConfig) processData := checks.NewProcessData(coreConfig) @@ -45,7 +45,7 @@ func NewProcessCollector(coreConfig, sysProbeConfig config.Reader) *Collector { // Collector collects processes to send to the remote process collector in the core agent. // It is only intended to be used when language detection is enabled, and the process check is disabled. type Collector struct { - ddConfig config.Reader + ddConfig pkgconfigmodel.Reader processData *checks.ProcessData @@ -106,7 +106,7 @@ func (c *Collector) run(ctx context.Context, containerProvider proccontainers.Co // Additionally, if the remote process collector is not enabled in the core agent, there is no reason to collect processes. Therefore, we check `language_detection.enabled`. // We also check `process_config.run_in_core_agent.enabled` because this collector should only be used when the core agent collector is not running. // Finally, we only want to run this collector in the process agent, so if we're running as anything else we should disable the collector. -func Enabled(cfg config.Reader) bool { +func Enabled(cfg pkgconfigmodel.Reader) bool { if cfg.GetBool("process_config.process_collection.enabled") { return false } diff --git a/pkg/process/metadata/workloadmeta/extractor.go b/pkg/process/metadata/workloadmeta/extractor.go index 429c092bd5642..25db230e57263 100644 --- a/pkg/process/metadata/workloadmeta/extractor.go +++ b/pkg/process/metadata/workloadmeta/extractor.go @@ -11,7 +11,7 @@ import ( "strconv" "sync" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/languagedetection" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/process/procutil" @@ -51,7 +51,7 @@ type WorkloadMetaExtractor struct { pidToCid map[int]string - sysprobeConfig config.Reader + sysprobeConfig pkgconfigmodel.Reader } // ProcessCacheDiff holds the information about processes that have been created and deleted in the past @@ -72,7 +72,7 @@ var ( ) // GetSharedWorkloadMetaExtractor returns a shared WorkloadMetaExtractor -func GetSharedWorkloadMetaExtractor(sysprobeConfig config.Reader) *WorkloadMetaExtractor { +func GetSharedWorkloadMetaExtractor(sysprobeConfig pkgconfigmodel.Reader) *WorkloadMetaExtractor { initWorkloadMetaExtractor.Do(func() { sharedWorkloadMetaExtractor = NewWorkloadMetaExtractor(sysprobeConfig) }) @@ -80,7 +80,7 @@ func GetSharedWorkloadMetaExtractor(sysprobeConfig config.Reader) *WorkloadMetaE } // NewWorkloadMetaExtractor constructs the WorkloadMetaExtractor. -func NewWorkloadMetaExtractor(sysprobeConfig config.Reader) *WorkloadMetaExtractor { +func NewWorkloadMetaExtractor(sysprobeConfig pkgconfigmodel.Reader) *WorkloadMetaExtractor { log.Info("Instantiating a new WorkloadMetaExtractor") return &WorkloadMetaExtractor{ @@ -197,7 +197,7 @@ func getDifference(oldCache, newCache map[string]*ProcessEntity) []*ProcessEntit } // Enabled returns whether the extractor should be enabled -func Enabled(ddconfig config.Reader) bool { +func Enabled(ddconfig pkgconfigmodel.Reader) bool { enabled := ddconfig.GetBool("language_detection.enabled") if enabled && runtime.GOOS == "darwin" { log.Warn("Language detection is not supported on macOS") diff --git a/pkg/process/metadata/workloadmeta/grpc.go b/pkg/process/metadata/workloadmeta/grpc.go index 28592954005d6..ab4c947bd5d69 100644 --- a/pkg/process/metadata/workloadmeta/grpc.go +++ b/pkg/process/metadata/workloadmeta/grpc.go @@ -18,7 +18,8 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/telemetry" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" @@ -32,7 +33,7 @@ var DuplicateConnectionErr = errors.New("the stream was closed because another c // GRPCServer implements a gRPC server to expose Process Entities collected with a WorkloadMetaExtractor type GRPCServer struct { - config config.Reader + config pkgconfigmodel.Reader extractor *WorkloadMetaExtractor server *grpc.Server // The address of the server set by start(). Primarily used for testing. May be nil if start() has not been called. @@ -53,7 +54,7 @@ var ( ) // NewGRPCServer creates a new instance of a GRPCServer -func NewGRPCServer(config config.Reader, extractor *WorkloadMetaExtractor) *GRPCServer { +func NewGRPCServer(config pkgconfigmodel.Reader, extractor *WorkloadMetaExtractor) *GRPCServer { l := &GRPCServer{ config: config, extractor: extractor, @@ -201,8 +202,8 @@ func (l *GRPCServer) StreamEntities(_ *pbgo.ProcessStreamEntitiesRequest, out pb } // getListener returns a listening connection -func getListener(cfg config.Reader) (net.Listener, error) { - host, err := config.GetIPCAddress() +func getListener(cfg pkgconfigmodel.Reader) (net.Listener, error) { + host, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } @@ -211,11 +212,11 @@ func getListener(cfg config.Reader) (net.Listener, error) { return net.Listen("tcp", address) } -func getGRPCStreamPort(cfg config.Reader) int { +func getGRPCStreamPort(cfg pkgconfigmodel.Reader) int { grpcPort := cfg.GetInt("process_config.language_detection.grpc_port") if grpcPort <= 0 { - log.Warnf("Invalid process_config.language_detection.grpc_port -- %d, using default port %d", grpcPort, config.DefaultProcessEntityStreamPort) - grpcPort = config.DefaultProcessEntityStreamPort + log.Warnf("Invalid process_config.language_detection.grpc_port -- %d, using default port %d", grpcPort, pkgconfigsetup.DefaultProcessEntityStreamPort) + grpcPort = pkgconfigsetup.DefaultProcessEntityStreamPort } return grpcPort } diff --git a/pkg/process/metadata/workloadmeta/grpc_test.go b/pkg/process/metadata/workloadmeta/grpc_test.go index 8788abf4c098b..7f9b7bccd87a3 100644 --- a/pkg/process/metadata/workloadmeta/grpc_test.go +++ b/pkg/process/metadata/workloadmeta/grpc_test.go @@ -19,8 +19,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/process/procutil" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" @@ -33,7 +33,7 @@ func TestGetGRPCStreamPort(t *testing.T) { cfg := configmock.New(t) cfg.SetWithoutSource("process_config.language_detection.grpc_port", "lorem ipsum") - assert.Equal(t, config.DefaultProcessEntityStreamPort, getGRPCStreamPort(cfg)) + assert.Equal(t, pkgconfigsetup.DefaultProcessEntityStreamPort, getGRPCStreamPort(cfg)) }) t.Run("valid port", func(t *testing.T) { @@ -45,7 +45,7 @@ func TestGetGRPCStreamPort(t *testing.T) { t.Run("default", func(t *testing.T) { cfg := configmock.New(t) - assert.Equal(t, config.DefaultProcessEntityStreamPort, getGRPCStreamPort(cfg)) + assert.Equal(t, pkgconfigsetup.DefaultProcessEntityStreamPort, getGRPCStreamPort(cfg)) }) } diff --git a/pkg/process/net/common.go b/pkg/process/net/common.go index a9b7a64430143..eb13384a656d3 100644 --- a/pkg/process/net/common.go +++ b/pkg/process/net/common.go @@ -44,6 +44,7 @@ type Conn interface { const ( contentTypeProtobuf = "application/protobuf" + contentTypeJSON = "application/json" ) var ( @@ -166,6 +167,32 @@ func (r *RemoteSysProbeUtil) GetConnections(clientID string) (*model.Connections return conns, nil } +// GetNetworkID fetches the network_id (vpc_id) from system-probe +func (r *RemoteSysProbeUtil) GetNetworkID() (string, error) { + req, err := http.NewRequest("GET", networkIDURL, nil) + if err != nil { + return "", fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("Accept", "text/plain") + resp, err := r.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("failed to execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("network_id request failed: url: %s, status code: %d", networkIDURL, resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read response body: %w", err) + } + + return string(body), nil +} + // GetPing returns the results of a ping to a host func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, interval time.Duration, timeout time.Duration) ([]byte, error) { req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s?client_id=%s&count=%d&interval=%d&timeout=%d", pingURL, host, clientID, count, interval, timeout), nil) @@ -173,7 +200,7 @@ func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, in return nil, err } - req.Header.Set("Accept", "application/json") + req.Header.Set("Accept", contentTypeJSON) resp, err := r.httpClient.Do(req) if err != nil { return nil, err @@ -200,7 +227,9 @@ func (r *RemoteSysProbeUtil) GetPing(clientID string, host string, count int, in // GetTraceroute returns the results of a traceroute to a host func (r *RemoteSysProbeUtil) GetTraceroute(clientID string, host string, port uint16, protocol nppayload.Protocol, maxTTL uint8, timeout time.Duration) ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout+10*time.Second) // allow extra time for the system probe communication overhead + httpTimeout := timeout*time.Duration(maxTTL) + 10*time.Second // allow extra time for the system probe communication overhead, calculate full timeout for TCP traceroute + log.Tracef("Network Path traceroute HTTP request timeout: %s", httpTimeout) + ctx, cancel := context.WithTimeout(context.Background(), httpTimeout) defer cancel() req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s?client_id=%s&port=%d&max_ttl=%d&timeout=%d&protocol=%s", tracerouteURL, host, clientID, port, maxTTL, timeout, protocol), nil) @@ -208,7 +237,7 @@ func (r *RemoteSysProbeUtil) GetTraceroute(clientID string, host string, port ui return nil, err } - req.Header.Set("Accept", "application/json") + req.Header.Set("Accept", contentTypeJSON) resp, err := r.tracerouteClient.Do(req) if err != nil { return nil, err @@ -283,41 +312,6 @@ func (r *RemoteSysProbeUtil) Register(clientID string) error { return nil } -func newSystemProbe(path string) *RemoteSysProbeUtil { - return &RemoteSysProbeUtil{ - path: path, - httpClient: http.Client{ - Timeout: 10 * time.Second, - Transport: &http.Transport{ - MaxIdleConns: 2, - IdleConnTimeout: 30 * time.Second, - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial(netType, path) - }, - TLSHandshakeTimeout: 1 * time.Second, - ResponseHeaderTimeout: 5 * time.Second, - ExpectContinueTimeout: 50 * time.Millisecond, - }, - }, - pprofClient: http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial(netType, path) - }, - }, - }, - tracerouteClient: http.Client{ - // no timeout set here, the expected usage of this client - // is that the caller will set a timeout on each request - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial(netType, path) - }, - }, - }, - } -} - //nolint:revive // TODO(PROC) Fix revive linter func (r *RemoteSysProbeUtil) DetectLanguage(pids []int32) ([]languagemodels.Language, error) { procs := make([]*languagepb.Process, len(pids)) diff --git a/pkg/process/net/common_linux.go b/pkg/process/net/common_linux.go index 2dc5c7db28c8d..218137d1374a0 100644 --- a/pkg/process/net/common_linux.go +++ b/pkg/process/net/common_linux.go @@ -8,8 +8,12 @@ package net import ( + "context" "fmt" + "net" + "net/http" "os" + "time" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" ) @@ -18,6 +22,7 @@ const ( pingURL = "http://unix/" + string(sysconfig.PingModule) + "/ping/" tracerouteURL = "http://unix/" + string(sysconfig.TracerouteModule) + "/traceroute/" connectionsURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/connections" + networkIDURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/network_id" procStatsURL = "http://unix/" + string(sysconfig.ProcessModule) + "/stats" registerURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/register" statsURL = "http://unix/debug/stats" @@ -39,3 +44,39 @@ func CheckPath(path string) error { } return nil } + +// newSystemProbe creates a group of clients to interact with system-probe. +func newSystemProbe(path string) *RemoteSysProbeUtil { + return &RemoteSysProbeUtil{ + path: path, + httpClient: http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConns: 2, + IdleConnTimeout: 30 * time.Second, + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial(netType, path) + }, + TLSHandshakeTimeout: 1 * time.Second, + ResponseHeaderTimeout: 5 * time.Second, + ExpectContinueTimeout: 50 * time.Millisecond, + }, + }, + pprofClient: http.Client{ + Transport: &http.Transport{ + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial(netType, path) + }, + }, + }, + tracerouteClient: http.Client{ + // no timeout set here, the expected usage of this client + // is that the caller will set a timeout on each request + Transport: &http.Transport{ + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial(netType, path) + }, + }, + }, + } +} diff --git a/pkg/process/net/common_unsupported.go b/pkg/process/net/common_unsupported.go index 03a481a2de400..ebdea5968e5bb 100644 --- a/pkg/process/net/common_unsupported.go +++ b/pkg/process/net/common_unsupported.go @@ -40,6 +40,11 @@ func (r *RemoteSysProbeUtil) GetConnections(_ string) (*model.Connections, error return nil, ErrNotImplemented } +// GetNetworkID is not supported +func (r *RemoteSysProbeUtil) GetNetworkID() (string, error) { + return "", ErrNotImplemented +} + // GetStats is not supported func (r *RemoteSysProbeUtil) GetStats() (map[string]interface{}, error) { return nil, ErrNotImplemented diff --git a/pkg/process/net/common_windows.go b/pkg/process/net/common_windows.go index 4ad0d218e65f5..2afd2ddfa003f 100644 --- a/pkg/process/net/common_windows.go +++ b/pkg/process/net/common_windows.go @@ -8,13 +8,18 @@ package net import ( + "context" "fmt" + "net" + "net/http" + "time" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" ) const ( connectionsURL = "http://localhost:3333/" + string(sysconfig.NetworkTracerModule) + "/connections" + networkIDURL = "http://unix/" + string(sysconfig.NetworkTracerModule) + "/network_id" registerURL = "http://localhost:3333/" + string(sysconfig.NetworkTracerModule) + "/register" languageDetectionURL = "http://localhost:3333/" + string(sysconfig.LanguageDetectionModule) + "/detect" statsURL = "http://localhost:3333/debug/stats" @@ -28,6 +33,17 @@ const ( procStatsURL = "http://localhost:3333/" + string(sysconfig.ProcessModule) + "stats" // pingURL is not used in windows, the value is added to avoid compilation error in windows pingURL = "http://localhost:3333/" + string(sysconfig.PingModule) + "/ping/" + + // SystemProbePipeName is the production named pipe for system probe + SystemProbePipeName = `\\.\pipe\dd_system_probe` + + // systemProbeMaxIdleConns sets the maximum number of idle named pipe connections. + systemProbeMaxIdleConns = 2 + + // systemProbeIdleConnTimeout is the time a named pipe connection is held up idle before being closed. + // This should be small since connections are local, to close them as soon as they are done, + // and to quickly service new pending connections. + systemProbeIdleConnTimeout = 5 * time.Second ) // CheckPath is used to make sure the globalSocketPath has been set before attempting to connect @@ -37,3 +53,48 @@ func CheckPath(path string) error { } return nil } + +// NewSystemProbeClient returns a http client configured to talk to the system-probe +func NewSystemProbeClient() *http.Client { + return &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConns: systemProbeMaxIdleConns, + IdleConnTimeout: systemProbeIdleConnTimeout, + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return DialSystemProbe() + }, + TLSHandshakeTimeout: 1 * time.Second, + ResponseHeaderTimeout: 2 * time.Second, + ExpectContinueTimeout: 50 * time.Millisecond, + }, + } +} + +// newSystemProbe creates a group of clients to interact with system-probe. +func newSystemProbe(path string) *RemoteSysProbeUtil { + return &RemoteSysProbeUtil{ + path: path, + httpClient: *NewSystemProbeClient(), + pprofClient: http.Client{ + Transport: &http.Transport{ + MaxIdleConns: systemProbeMaxIdleConns, + IdleConnTimeout: systemProbeIdleConnTimeout, + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return DialSystemProbe() + }, + }, + }, + tracerouteClient: http.Client{ + // no timeout set here, the expected usage of this client + // is that the caller will set a timeout on each request + Transport: &http.Transport{ + MaxIdleConns: systemProbeMaxIdleConns, + IdleConnTimeout: systemProbeIdleConnTimeout, + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return DialSystemProbe() + }, + }, + }, + } +} diff --git a/pkg/process/net/mocks/sys_probe_util.go b/pkg/process/net/mocks/sys_probe_util.go index 3bf0b2c1d7270..0d0af5300fa4f 100644 --- a/pkg/process/net/mocks/sys_probe_util.go +++ b/pkg/process/net/mocks/sys_probe_util.go @@ -43,6 +43,34 @@ func (_m *SysProbeUtil) GetConnections(clientID string) (*process.Connections, e return r0, r1 } +// GetNetworkID provides a mock function with given fields: +func (_m *SysProbeUtil) GetNetworkID() (string, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetNetworkID") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func() (string, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetProcStats provides a mock function with given fields: pids func (_m *SysProbeUtil) GetProcStats(pids []int32) (*process.ProcStatsWithPermByPID, error) { ret := _m.Called(pids) diff --git a/pkg/process/net/shared.go b/pkg/process/net/shared.go index 72a6e418865c6..a0a7aa18ae327 100644 --- a/pkg/process/net/shared.go +++ b/pkg/process/net/shared.go @@ -13,4 +13,5 @@ type SysProbeUtil interface { GetStats() (map[string]interface{}, error) GetProcStats(pids []int32) (*model.ProcStatsWithPermByPID, error) Register(clientID string) error + GetNetworkID() (string, error) } diff --git a/pkg/process/net/uds.go b/pkg/process/net/uds.go index bfbbb4ed3b796..85c46b70354c5 100644 --- a/pkg/process/net/uds.go +++ b/pkg/process/net/uds.go @@ -22,8 +22,8 @@ type UDSListener struct { socketPath string } -// NewListener returns an idle UDSListener -func NewListener(socketAddr string) (*UDSListener, error) { +// newSocketListener creates a Unix Domain Socket Listener +func newSocketListener(socketAddr string) (*UDSListener, error) { if len(socketAddr) == 0 { return nil, fmt.Errorf("uds: empty socket path provided") } @@ -73,6 +73,16 @@ func NewListener(socketAddr string) (*UDSListener, error) { return listener, nil } +// NewSystemProbeListener returns an idle UDSListener +func NewSystemProbeListener(socketAddr string) (*UDSListener, error) { + var listener, err = newSocketListener(socketAddr) + if err != nil { + return nil, fmt.Errorf("error creating IPC socket: %s", err) + } + + return listener, err +} + // GetListener will return the underlying Conn's net.Listener func (l *UDSListener) GetListener() net.Listener { return l.conn diff --git a/pkg/process/net/uds_test.go b/pkg/process/net/uds_test.go index 3e8290966bc41..51affd4815d5f 100644 --- a/pkg/process/net/uds_test.go +++ b/pkg/process/net/uds_test.go @@ -25,7 +25,7 @@ func testSocketExistsNewUDSListener(t *testing.T, socketPath string) { assert.NoError(t, err) // Create a new socket using UDSListener - l, err := NewListener(socketPath) + l, err := NewSystemProbeListener(socketPath) require.NoError(t, err) l.Stop() @@ -38,12 +38,12 @@ func testSocketExistsAsRegularFileNewUDSListener(t *testing.T, socketPath string defer f.Close() // Create a new socket using UDSListener - _, err = NewListener(socketPath) + _, err = NewSystemProbeListener(socketPath) require.Error(t, err) } func testWorkingNewUDSListener(t *testing.T, socketPath string) { - s, err := NewListener(socketPath) + s, err := NewSystemProbeListener(socketPath) require.NoError(t, err) defer s.Stop() diff --git a/pkg/process/net/windows_pipe.go b/pkg/process/net/windows_pipe.go index c3969b81fd54d..2d2baea60532a 100644 --- a/pkg/process/net/windows_pipe.go +++ b/pkg/process/net/windows_pipe.go @@ -8,7 +8,26 @@ package net import ( + "fmt" "net" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/Microsoft/go-winio" +) + +const ( + // Buffer sizes for the system probe named pipe. + // The sizes are advisory, Windows can adjust them, but should be small enough to preserve + // the nonpaged pool. + namedPipeInputBufferSize = int32(4096) + namedPipeOutputBufferSize = int32(4096) + + // DACL for the system probe named pipe. + // SE_DACL_PROTECTED (P), SE_DACL_AUTO_INHERITED (AI) + // Allow Everyone (WD) + // nolint:revive // TODO: Hardened DACL and ensure the datadogagent run-as user is allowed. + namedPipeSecurityDescriptor = "D:PAI(A;;FA;;;WD)" ) // WindowsPipeListener for communicating with Probe @@ -17,10 +36,36 @@ type WindowsPipeListener struct { pipePath string } -// NewListener sets up a TCP listener for now, will eventually be a named pipe -func NewListener(socketAddr string) (*WindowsPipeListener, error) { - l, err := net.Listen("tcp", socketAddr) - return &WindowsPipeListener{l, "path"}, err +// systemProbePipeName is the effective named pipe path for system probe +var systemProbePipeName = SystemProbePipeName + +// systemProbePipSecurityDescriptor has the effective DACL for the system probe named pipe. +var systemProbePipSecurityDescriptor = namedPipeSecurityDescriptor + +// newPipeListener creates a standardized named pipe server and with hardened ACL +func newPipeListener(namedPipeName string) (net.Listener, error) { + // The DACL must allow the run-as user of datadogagent. + config := winio.PipeConfig{ + SecurityDescriptor: systemProbePipSecurityDescriptor, + InputBufferSize: namedPipeInputBufferSize, + OutputBufferSize: namedPipeOutputBufferSize, + } + + // winio specifies virtually unlimited number of named pipe instances but is limited by + // the nonpaged pool. + return winio.ListenPipe(namedPipeName, &config) +} + +// NewSystemProbeListener sets up a named pipe listener for the system probe service. +func NewSystemProbeListener(_ string) (*WindowsPipeListener, error) { + // socketAddr not used + + namedPipe, err := newPipeListener(systemProbePipeName) + if err != nil { + return nil, fmt.Errorf("error named pipe %s : %s", systemProbePipeName, err) + } + + return &WindowsPipeListener{namedPipe, systemProbePipeName}, nil } // GetListener will return underlying Listener's conn @@ -32,3 +77,22 @@ func (wp *WindowsPipeListener) GetListener() net.Listener { func (wp *WindowsPipeListener) Stop() { wp.conn.Close() } + +// DialSystemProbe connects to the system-probe service endpoint +func DialSystemProbe() (net.Conn, error) { + // Go clients do not immediately close (named pipe) connections when done, + // they keep connections idle for a while. Make sure the idle time + // is not too high and the timeout is generous enough for pending connections. + var timeout = time.Duration(30 * time.Second) + + namedPipe, err := winio.DialPipe(systemProbePipeName, &timeout) + if err != nil { + // This important error may not get reported upstream, making connection failures + // very difficult to diagnose. Explicitly log the error here too for diagnostics. + var namedPipeErr = fmt.Errorf("error connecting to named pipe %s : %s", systemProbePipeName, err) + log.Errorf("%s", namedPipeErr.Error()) + return nil, namedPipeErr + } + + return namedPipe, nil +} diff --git a/pkg/process/net/windows_pipe_testutil.go b/pkg/process/net/windows_pipe_testutil.go new file mode 100644 index 0000000000000..c0656efeb2903 --- /dev/null +++ b/pkg/process/net/windows_pipe_testutil.go @@ -0,0 +1,21 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build test && windows + +package net + +// OverrideSystemProbeNamedPipeConfig sets the active named pipe path and its DACL for +// System Probe connections. +// This is used by tests only to avoid conflicts with an existing locally installed Datadog agent. +func OverrideSystemProbeNamedPipeConfig(path string, securityDescriptor string) { + if path != "" { + systemProbePipeName = path + } + + if securityDescriptor != "" { + systemProbePipSecurityDescriptor = securityDescriptor + } +} diff --git a/pkg/process/procutil/data_scrubber.go b/pkg/process/procutil/data_scrubber.go index 18c3faf62bad1..4728b0b91dcf2 100644 --- a/pkg/process/procutil/data_scrubber.go +++ b/pkg/process/procutil/data_scrubber.go @@ -102,7 +102,7 @@ func CompileStringsToRegex(words []string) []DataScrubberPattern { continue } - pattern := "(?P( +| -{1,2})(?i)" + enhancedWord.String() + ")(?P +|=|:)(?P[^\\s]*)" + pattern := "(?P( +| -{1,2})(?i)" + enhancedWord.String() + ")(?P +|=|:)(?P(\"([^\"]*)\")|('([^']*)')|[^\\s]*)" r, err := regexp.Compile(pattern) if err == nil { compiledRegexps = append(compiledRegexps, DataScrubberPattern{ diff --git a/pkg/process/procutil/data_scrubber_test.go b/pkg/process/procutil/data_scrubber_test.go index 7c043ecd1d3ed..980b0805c45f0 100644 --- a/pkg/process/procutil/data_scrubber_test.go +++ b/pkg/process/procutil/data_scrubber_test.go @@ -62,6 +62,9 @@ type testProcess struct { func setupSensitiveCmdlines() []testCase { return []testCase{ + {[]string{"process --password=\"Data Source another_password=12345\""}, []string{"process", "--password=********"}}, + {[]string{"process --password=\"Data Source\""}, []string{"process", "--password=********"}}, + {[]string{"process --password:'Data Source another_pass=12345'"}, []string{"process", "--password:********"}}, {[]string{"agent", "password", "-token", "1234"}, []string{"agent", "password", "********", "1234"}}, {[]string{"agent", "-password", "1234"}, []string{"agent", "-password", "********"}}, {[]string{"agent --password > /password/secret; agent --password echo >> /etc"}, []string{"agent", "--password", "********", "/password/secret;", "agent", "--password", "********", ">>", "/etc"}}, diff --git a/pkg/process/runner/collector_api_test.go b/pkg/process/runner/collector_api_test.go index 7c72976dfe59e..257b07e00d659 100644 --- a/pkg/process/runner/collector_api_test.go +++ b/pkg/process/runner/collector_api_test.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/agent-payload/v5/process" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/checks" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" @@ -33,7 +33,7 @@ import ( const testHostName = "test-host" -func setProcessEndpointsForTest(config ddconfig.Config, eps ...apicfg.Endpoint) { +func setProcessEndpointsForTest(config pkgconfigmodel.Config, eps ...apicfg.Endpoint) { additionalEps := make(map[string][]string) for i, ep := range eps { if i == 0 { @@ -46,7 +46,7 @@ func setProcessEndpointsForTest(config ddconfig.Config, eps ...apicfg.Endpoint) config.SetWithoutSource("process_config.additional_endpoints", additionalEps) } -func setProcessEventsEndpointsForTest(config ddconfig.Config, eps ...apicfg.Endpoint) { +func setProcessEventsEndpointsForTest(config pkgconfigmodel.Config, eps ...apicfg.Endpoint) { additionalEps := make(map[string][]string) for i, ep := range eps { if i == 0 { @@ -432,11 +432,11 @@ func TestMultipleAPIKeys(t *testing.T) { }) } -func runCollectorTest(t *testing.T, check checks.Check, epConfig *endpointConfig, mockConfig ddconfig.Config, tc func(c *CheckRunner, ep *mockEndpoint)) { +func runCollectorTest(t *testing.T, check checks.Check, epConfig *endpointConfig, mockConfig pkgconfigmodel.Config, tc func(c *CheckRunner, ep *mockEndpoint)) { runCollectorTestWithAPIKeys(t, check, epConfig, []string{"apiKey"}, mockConfig, tc) } -func runCollectorTestWithAPIKeys(t *testing.T, check checks.Check, epConfig *endpointConfig, apiKeys []string, mockConfig ddconfig.Config, tc func(c *CheckRunner, ep *mockEndpoint)) { +func runCollectorTestWithAPIKeys(t *testing.T, check checks.Check, epConfig *endpointConfig, apiKeys []string, mockConfig pkgconfigmodel.Config, tc func(c *CheckRunner, ep *mockEndpoint)) { ep := newMockEndpoint(t, epConfig) collectorAddr, eventsAddr := ep.start() defer ep.stop() diff --git a/pkg/process/runner/endpoint/endpoints.go b/pkg/process/runner/endpoint/endpoints.go index f85c2e929574c..7aaaf6b276d9c 100644 --- a/pkg/process/runner/endpoint/endpoints.go +++ b/pkg/process/runner/endpoint/endpoints.go @@ -10,24 +10,25 @@ import ( "fmt" "net/url" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" ) // GetAPIEndpoints returns the list of api endpoints from the config -func GetAPIEndpoints(config ddconfig.Reader) (eps []apicfg.Endpoint, err error) { +func GetAPIEndpoints(config pkgconfigmodel.Reader) (eps []apicfg.Endpoint, err error) { return getAPIEndpointsWithKeys(config, "https://process.", "process_config.process_dd_url", "process_config.additional_endpoints") } // GetEventsAPIEndpoints returns the list of api event endpoints from the config -func GetEventsAPIEndpoints(config ddconfig.Reader) (eps []apicfg.Endpoint, err error) { +func GetEventsAPIEndpoints(config pkgconfigmodel.Reader) (eps []apicfg.Endpoint, err error) { return getAPIEndpointsWithKeys(config, "https://process-events.", "process_config.events_dd_url", "process_config.events_additional_endpoints") } -func getAPIEndpointsWithKeys(config ddconfig.Reader, prefix, defaultEpKey, additionalEpsKey string) (eps []apicfg.Endpoint, err error) { +func getAPIEndpointsWithKeys(config pkgconfigmodel.Reader, prefix, defaultEpKey, additionalEpsKey string) (eps []apicfg.Endpoint, err error) { // Setup main endpoint - mainEndpointURL, err := url.Parse(utils.GetMainEndpoint(ddconfig.Datadog(), prefix, defaultEpKey)) + mainEndpointURL, err := url.Parse(utils.GetMainEndpoint(pkgconfigsetup.Datadog(), prefix, defaultEpKey)) if err != nil { return nil, fmt.Errorf("error parsing %s: %s", defaultEpKey, err) } diff --git a/pkg/process/runner/endpoints_test.go b/pkg/process/runner/endpoints_test.go index 779551e949541..bfdfe816772ce 100644 --- a/pkg/process/runner/endpoints_test.go +++ b/pkg/process/runner/endpoints_test.go @@ -11,8 +11,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" ) @@ -38,7 +38,7 @@ func TestGetAPIEndpoints(t *testing.T) { expected: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), }, }, }, @@ -63,7 +63,7 @@ func TestGetAPIEndpoints(t *testing.T) { }, expected: []apicfg.Endpoint{ { - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), APIKey: "test", }, { @@ -183,13 +183,13 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { expectedEndpoints: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), }, }, expectedEventsEndpoints: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEventsEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEventsEndpoint), }, }, }, @@ -206,7 +206,7 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { expectedEventsEndpoints: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEventsEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEventsEndpoint), }, }, }, @@ -217,7 +217,7 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { expectedEndpoints: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), }, }, expectedEventsEndpoints: []apicfg.Endpoint{ @@ -249,7 +249,7 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { }, expectedEndpoints: []apicfg.Endpoint{ { - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), APIKey: "test", }, { @@ -267,7 +267,7 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { }, expectedEventsEndpoints: []apicfg.Endpoint{ { - Endpoint: mkurl(config.DefaultProcessEventsEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEventsEndpoint), APIKey: "test", }, { diff --git a/pkg/process/runner/runner.go b/pkg/process/runner/runner.go index c1dd778c270d8..ba5cda0bc8fa0 100644 --- a/pkg/process/runner/runner.go +++ b/pkg/process/runner/runner.go @@ -20,7 +20,7 @@ import ( sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/process/types" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" oconfig "github.com/DataDog/datadog-agent/pkg/orchestrator/config" "github.com/DataDog/datadog-agent/pkg/process/checks" "github.com/DataDog/datadog-agent/pkg/process/status" @@ -54,7 +54,7 @@ type Runner interface{} // CheckRunner will collect metrics from the local system and ship to the backend. type CheckRunner struct { - config ddconfig.Reader + config pkgconfigmodel.Reader sysProbeCfg *checks.SysProbeConfig hostInfo *checks.HostInfo @@ -97,7 +97,7 @@ func (l *CheckRunner) RunRealTime() bool { // NewRunner creates a new CheckRunner func NewRunner( - config ddconfig.Reader, + config pkgconfigmodel.Reader, sysCfg *sysconfigtypes.Config, hostInfo *checks.HostInfo, enabledChecks []checks.Check, @@ -119,7 +119,7 @@ func NewRunner( // NewRunnerWithChecks creates a new CheckRunner func NewRunnerWithChecks( - config ddconfig.Reader, + config pkgconfigmodel.Reader, sysProbeCfg *checks.SysProbeConfig, hostInfo *checks.HostInfo, checks []checks.Check, diff --git a/pkg/process/runner/submitter.go b/pkg/process/runner/submitter.go index a2ee141a960a7..f4fd31c1cd92c 100644 --- a/pkg/process/runner/submitter.go +++ b/pkg/process/runner/submitter.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/comp/process/forwarders" "github.com/DataDog/datadog-agent/comp/process/types" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/checks" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" "github.com/DataDog/datadog-agent/pkg/process/statsd" @@ -92,22 +92,22 @@ type CheckSubmitter struct { func NewSubmitter(config config.Component, log log.Component, forwarders forwarders.Component, hostname string) (*CheckSubmitter, error) { queueBytes := config.GetInt("process_config.process_queue_bytes") if queueBytes <= 0 { - log.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, ddconfig.DefaultProcessQueueBytes) - queueBytes = ddconfig.DefaultProcessQueueBytes + log.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, pkgconfigsetup.DefaultProcessQueueBytes) + queueBytes = pkgconfigsetup.DefaultProcessQueueBytes } queueSize := config.GetInt("process_config.queue_size") if queueSize <= 0 { - log.Warnf("Invalid check queue size: %d. Using default value: %d", queueSize, ddconfig.DefaultProcessQueueSize) - queueSize = ddconfig.DefaultProcessQueueSize + log.Warnf("Invalid check queue size: %d. Using default value: %d", queueSize, pkgconfigsetup.DefaultProcessQueueSize) + queueSize = pkgconfigsetup.DefaultProcessQueueSize } processResults := api.NewWeightedQueue(queueSize, int64(queueBytes)) log.Debugf("Creating process check queue with max_size=%d and max_weight=%d", processResults.MaxSize(), processResults.MaxWeight()) rtQueueSize := config.GetInt("process_config.rt_queue_size") if rtQueueSize <= 0 { - log.Warnf("Invalid rt check queue size: %d. Using default value: %d", rtQueueSize, ddconfig.DefaultProcessRTQueueSize) - rtQueueSize = ddconfig.DefaultProcessRTQueueSize + log.Warnf("Invalid rt check queue size: %d. Using default value: %d", rtQueueSize, pkgconfigsetup.DefaultProcessRTQueueSize) + rtQueueSize = pkgconfigsetup.DefaultProcessRTQueueSize } // reuse main queue's ProcessQueueBytes because it's unlikely that it'll reach to that size in bytes, so we don't need a separate config for it rtProcessResults := api.NewWeightedQueue(rtQueueSize, int64(queueBytes)) diff --git a/pkg/process/runner/submitter_test.go b/pkg/process/runner/submitter_test.go index 81da4347a1473..2825af8105c89 100644 --- a/pkg/process/runner/submitter_test.go +++ b/pkg/process/runner/submitter_test.go @@ -16,19 +16,21 @@ import ( "go.uber.org/fx" model "github.com/DataDog/agent-payload/v5/process" + mockStatsd "github.com/DataDog/datadog-go/v5/statsd/mocks" + "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" "github.com/DataDog/datadog-agent/comp/process/forwarders" "github.com/DataDog/datadog-agent/comp/process/forwarders/forwardersimpl" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processStatsd "github.com/DataDog/datadog-agent/pkg/process/statsd" "github.com/DataDog/datadog-agent/pkg/process/util/api/headers" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/version" - mockStatsd "github.com/DataDog/datadog-go/v5/statsd/mocks" ) func TestNewCollectorQueueSize(t *testing.T) { @@ -42,7 +44,7 @@ func TestNewCollectorQueueSize(t *testing.T) { name: "default queue size", override: false, queueSize: 42, - expectedQueueSize: ddconfig.DefaultProcessQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueSize, }, { name: "valid queue size override", @@ -54,13 +56,13 @@ func TestNewCollectorQueueSize(t *testing.T) { name: "invalid negative queue size override", override: true, queueSize: -10, - expectedQueueSize: ddconfig.DefaultProcessQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueSize, }, { name: "invalid 0 queue size override", override: true, queueSize: 0, - expectedQueueSize: ddconfig.DefaultProcessQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueSize, }, } @@ -89,7 +91,7 @@ func TestNewCollectorRTQueueSize(t *testing.T) { name: "default queue size", override: false, queueSize: 2, - expectedQueueSize: ddconfig.DefaultProcessRTQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessRTQueueSize, }, { name: "valid queue size override", @@ -101,13 +103,13 @@ func TestNewCollectorRTQueueSize(t *testing.T) { name: "invalid negative size override", override: true, queueSize: -2, - expectedQueueSize: ddconfig.DefaultProcessRTQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessRTQueueSize, }, { name: "invalid 0 queue size override", override: true, queueSize: 0, - expectedQueueSize: ddconfig.DefaultProcessRTQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessRTQueueSize, }, } @@ -136,7 +138,7 @@ func TestNewCollectorProcessQueueBytes(t *testing.T) { name: "default queue size", override: false, queueBytes: 42000, - expectedQueueSize: ddconfig.DefaultProcessQueueBytes, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueBytes, }, { name: "valid queue size override", @@ -148,13 +150,13 @@ func TestNewCollectorProcessQueueBytes(t *testing.T) { name: "invalid negative queue size override", override: true, queueBytes: -2, - expectedQueueSize: ddconfig.DefaultProcessQueueBytes, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueBytes, }, { name: "invalid 0 queue size override", override: true, queueBytes: 0, - expectedQueueSize: ddconfig.DefaultProcessQueueBytes, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueBytes, }, } @@ -390,7 +392,7 @@ func newSubmitterDeps(t *testing.T) submitterDeps { return fxutil.Test[submitterDeps](t, getForwardersMockModules(t, nil)) } -func newSubmitterDepsWithConfig(t *testing.T, config ddconfig.Config) submitterDeps { +func newSubmitterDepsWithConfig(t *testing.T, config pkgconfigmodel.Config) submitterDeps { overrides := config.AllSettings() return fxutil.Test[submitterDeps](t, getForwardersMockModules(t, overrides)) } diff --git a/pkg/process/status/expvars.go b/pkg/process/status/expvars.go index f9642d22c7385..897c1014f5829 100644 --- a/pkg/process/status/expvars.go +++ b/pkg/process/status/expvars.go @@ -21,7 +21,7 @@ import ( model "github.com/DataDog/agent-payload/v5/process" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/version" @@ -247,7 +247,7 @@ func publishDropCheckPayloads() interface{} { } // InitExpvars initializes expvars -func InitExpvars(_ ddconfig.Reader, hostname string, processModuleEnabled, languageDetectionEnabled bool, eps []apicfg.Endpoint) { +func InitExpvars(_ pkgconfigmodel.Reader, hostname string, processModuleEnabled, languageDetectionEnabled bool, eps []apicfg.Endpoint) { infoOnce.Do(func() { processExpvars := expvar.NewMap("process_agent") hostString := expvar.NewString("host") diff --git a/pkg/process/util/address.go b/pkg/process/util/address.go index 15dde9f118d23..c98964820a071 100644 --- a/pkg/process/util/address.go +++ b/pkg/process/util/address.go @@ -20,41 +20,6 @@ type Address struct { netip.Addr } -// WriteTo writes the address byte representation into the supplied buffer -func (a Address) WriteTo(b []byte) int { - if a.Is4() { - v := a.As4() - return copy(b, v[:]) - } - - v := a.As16() - return copy(b, v[:]) - -} - -// Bytes returns a byte slice representing the Address. -// You may want to consider using `WriteTo` instead to avoid allocations -func (a Address) Bytes() []byte { - // Note: this implicitly converts IPv4-in-6 to IPv4 - if a.Is4() || a.Is4In6() { - v := a.As4() - return v[:] - } - - v := a.As16() - return v[:] -} - -// Len returns the number of bytes required to represent this IP -func (a Address) Len() int { - return int(a.BitLen()) / 8 -} - -// IsZero reports whether a is its zero value -func (a Address) IsZero() bool { - return a.Addr == netip.Addr{} -} - // AddressFromNetIP returns an Address from a provided net.IP func AddressFromNetIP(ip net.IP) Address { addr, _ := netipx.FromStdIP(ip) @@ -71,7 +36,7 @@ func AddressFromString(s string) Address { // Warning: the returned `net.IP` will share the same underlying // memory as the given `buf` argument. func NetIPFromAddress(addr Address, buf []byte) net.IP { - n := addr.WriteTo(buf) + n := copy(buf, addr.AsSlice()) return net.IP(buf[:n]) } @@ -115,11 +80,6 @@ func V4Address(ip uint32) Address { } } -// V4AddressFromBytes creates an Address using the byte representation of an v4 IP -func V4AddressFromBytes(buf []byte) Address { - return Address{netip.AddrFrom4(*(*[4]byte)(buf))} -} - // V6Address creates an Address using the uint128 representation of an v6 IP func V6Address(low, high uint64) Address { var a [16]byte @@ -128,10 +88,5 @@ func V6Address(low, high uint64) Address { return Address{netip.AddrFrom16(a)} } -// V6AddressFromBytes creates an Address using the byte representation of an v6 IP -func V6AddressFromBytes(buf []byte) Address { - return Address{netip.AddrFrom16(*(*[16]byte)(buf))} -} - // IPBufferPool is meant to be used in conjunction with `NetIPFromAddress` var IPBufferPool = ddsync.NewSlicePool[byte](net.IPv6len, net.IPv6len) diff --git a/pkg/process/util/address_test.go b/pkg/process/util/address_test.go index bfbf57c312efc..a1de755961b49 100644 --- a/pkg/process/util/address_test.go +++ b/pkg/process/util/address_test.go @@ -88,22 +88,16 @@ func TestAddressUsageInMaps(t *testing.T) { func TestAddressV4(t *testing.T) { addr := V4Address(889192575) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V4AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("127.0.0.53")) assert.Equal(t, "127.0.0.53", addr.String()) addr = V4Address(0) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V4AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("0.0.0.0")) assert.Equal(t, "0.0.0.0", addr.String()) addr = V4Address(16820416) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V4AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("192.168.0.1")) assert.Equal(t, "192.168.0.1", addr.String()) @@ -111,31 +105,23 @@ func TestAddressV4(t *testing.T) { func TestAddressV6(t *testing.T) { addr := V6Address(889192575, 0) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("::7f00:35:0:0")) assert.Equal(t, "::7f00:35:0:0", addr.String()) assert.False(t, addr.IsLoopback()) addr = V6Address(0, 0) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("::")) assert.Equal(t, "::", addr.String()) addr = V6Address(72057594037927936, 0) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("::1")) assert.Equal(t, "::1", addr.String()) assert.True(t, addr.IsLoopback()) addr = V6Address(72059793061183488, 3087860000) - // Should be able to recreate addr from bytes alone - assert.Equal(t, addr, V6AddressFromBytes(addr.Bytes())) // Should be able to recreate addr from IP string assert.Equal(t, addr, AddressFromString("2001:db8::2:1")) assert.Equal(t, "2001:db8::2:1", addr.String()) @@ -176,35 +162,6 @@ func BenchmarkV6Address(b *testing.B) { runtime.KeepAlive(addr) } -func BenchmarkBytes(b *testing.B) { - var ( - addr = AddressFromString("8.8.8.8") - bytes []byte - ) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - // this allocates a slice descriptor that escapes to the heap - bytes = addr.Bytes() - } - runtime.KeepAlive(bytes) -} - -func BenchmarkWriteTo(b *testing.B) { - addr := AddressFromString("8.8.8.8") - bytes := make([]byte, 4) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - // this method shouldn't allocate - _ = addr.WriteTo(bytes) - bytes = bytes[:0] - } - runtime.KeepAlive(bytes) -} - func BenchmarkToLowHigh(b *testing.B) { addr := AddressFromString("8.8.8.8") var l, h uint64 diff --git a/pkg/process/util/status/status.go b/pkg/process/util/status/status.go index 2d05dbd664f2b..aea42e11f5fb6 100644 --- a/pkg/process/util/status/status.go +++ b/pkg/process/util/status/status.go @@ -16,7 +16,7 @@ import ( hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -128,7 +128,7 @@ func OverrideTime(t time.Time) StatusOption { } } -func getCoreStatus(coreConfig ddconfig.Reader) (s CoreStatus) { +func getCoreStatus(coreConfig pkgconfigmodel.Reader) (s CoreStatus) { return CoreStatus{ AgentVersion: version.AgentVersion, GoVersion: runtime.Version(), @@ -152,7 +152,7 @@ func getExpvars(expVarURL string) (s ProcessExpvars, err error) { } // GetStatus returns a Status object with runtime information about process-agent -func GetStatus(coreConfig ddconfig.Reader, expVarURL string) (*Status, error) { +func GetStatus(coreConfig pkgconfigmodel.Reader, expVarURL string) (*Status, error) { coreStatus := getCoreStatus(coreConfig) processExpVars, err := getExpvars(expVarURL) if err != nil { diff --git a/pkg/process/util/status/status_test.go b/pkg/process/util/status/status_test.go index 5efd72f4d9585..89f37e5ee9083 100644 --- a/pkg/process/util/status/status_test.go +++ b/pkg/process/util/status/status_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/require" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -77,7 +77,7 @@ func TestGetStatus(t *testing.T) { // Feature detection needs to run before host methods are called. During runtime, feature detection happens // when the datadog.yaml file is loaded cfg := configmock.New(t) - ddconfig.SetFeatures(t) + env.SetFeatures(t) cfg.SetWithoutSource("hostname", "test") // Prevents panic since feature detection has not run cfg.SetWithoutSource("language_detection.enabled", true) diff --git a/pkg/remoteconfig/state/products.go b/pkg/remoteconfig/state/products.go index 35974c5e94386..120229ecf2d39 100644 --- a/pkg/remoteconfig/state/products.go +++ b/pkg/remoteconfig/state/products.go @@ -30,6 +30,7 @@ var validProducts = map[string]struct{}{ ProductContainerAutoscalingValues: {}, ProductTesting1: {}, ProductTesting2: {}, + ProductOrchestratorK8sCRDs: {}, } const ( @@ -81,4 +82,6 @@ const ( ProductTesting1 = "TESTING1" // ProductTesting2 is a product used for testing remote config ProductTesting2 = "TESTING2" + // ProductOrchestratorK8sCRDs receives values for k8s crds + ProductOrchestratorK8sCRDs = "ORCHESTRATOR_K8S_CRDS" ) diff --git a/pkg/sbom/collectors/host/collector.go b/pkg/sbom/collectors/host/collector.go index b5d1a5a992a97..b92d8ad7360f2 100644 --- a/pkg/sbom/collectors/host/collector.go +++ b/pkg/sbom/collectors/host/collector.go @@ -35,6 +35,11 @@ func (c *Collector) Shutdown() { c.closed = true } +// channelSize defines the result channel size +// It doesn't need more than 1 because the host collector should +// not trigger multiple scans at the same time unlike for container-images. +const channelSize = 1 + func init() { collectors.RegisterCollector(collectors.HostCollector, &Collector{ resChan: make(chan sbom.ScanResult, channelSize), diff --git a/pkg/sbom/collectors/host/request.go b/pkg/sbom/collectors/host/request.go index 8ecd73f19a20e..c30cb453ceab9 100644 --- a/pkg/sbom/collectors/host/request.go +++ b/pkg/sbom/collectors/host/request.go @@ -3,22 +3,17 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build trivy || (windows && wmi) - package host import ( "io/fs" + "os" + "path/filepath" - "github.com/DataDog/datadog-agent/pkg/sbom" - "github.com/DataDog/datadog-agent/pkg/sbom/collectors" + "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/sbom/types" ) -// channelSize defines the result channel size -// It doesn't need more than 1 because the host collector should -// not trigger multiple scans at the same time unlike for container-images. -const channelSize = 1 - // scanRequest defines a scan request. This struct should be // hashable to be pushed in the work queue for processing. type scanRequest struct { @@ -26,19 +21,50 @@ type scanRequest struct { FS fs.FS } +type relFS struct { + root string + fs fs.FS +} + +func newFS(root string) fs.FS { + fs := os.DirFS(root) + return &relFS{root: "/", fs: fs} +} + +func (f *relFS) Open(name string) (fs.File, error) { + if filepath.IsAbs(name) { + var err error + name, err = filepath.Rel(f.root, name) + if err != nil { + return nil, err + } + } + + return f.fs.Open(name) +} + // NewScanRequest creates a new scan request -func NewScanRequest(path string, fs fs.FS) sbom.ScanRequest { +func NewScanRequest(path string, fs fs.FS) types.ScanRequest { return scanRequest{Path: path, FS: fs} } +// NewHostScanRequest creates a new scan request for the root filesystem +func NewHostScanRequest() types.ScanRequest { + scanPath := "/" + if hostRoot := os.Getenv("HOST_ROOT"); env.IsContainerized() && hostRoot != "" { + scanPath = hostRoot + } + return NewScanRequest(scanPath, newFS("/")) +} + // Collector returns the collector name func (r scanRequest) Collector() string { - return collectors.HostCollector + return "host" } // Type returns the scan request type -func (r scanRequest) Type(sbom.ScanOptions) string { - return sbom.ScanFilesystemType +func (r scanRequest) Type(types.ScanOptions) string { + return types.ScanFilesystemType } // ID returns the scan request ID diff --git a/pkg/sbom/sbom.go b/pkg/sbom/sbom.go index 551547d870a3a..accf0e8eb702b 100644 --- a/pkg/sbom/sbom.go +++ b/pkg/sbom/sbom.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/sbom/types" cyclonedxgo "github.com/CycloneDX/cyclonedx-go" ) @@ -26,19 +27,6 @@ type Report interface { ID() string } -// ScanOptions defines the scan options -type ScanOptions struct { - Analyzers []string - CheckDiskUsage bool - MinAvailableDisk uint64 - Timeout time.Duration - WaitAfter time.Duration - Fast bool - CollectFiles bool - UseMount bool - OverlayFsScan bool -} - // ScanOptionsFromConfig loads the scanning options from the configuration func ScanOptionsFromConfig(cfg config.Component, containers bool) (scanOpts ScanOptions) { if containers { @@ -59,11 +47,10 @@ func ScanOptionsFromConfig(cfg config.Component, containers bool) (scanOpts Scan } // ScanRequest defines the scan request interface -type ScanRequest interface { - Collector() string - Type(ScanOptions) string - ID() string -} +type ScanRequest = types.ScanRequest + +// ScanOptions defines the scan options +type ScanOptions = types.ScanOptions // ScanResult defines the scan result type ScanResult struct { diff --git a/pkg/sbom/scanner/scanner.go b/pkg/sbom/scanner/scanner.go index bf4e25676d723..19d25a01c808a 100644 --- a/pkg/sbom/scanner/scanner.go +++ b/pkg/sbom/scanner/scanner.go @@ -224,6 +224,11 @@ func (s *Scanner) startScanRequestHandler(ctx context.Context) { }() } +// GetCollector returns the collector with the specified name +func (s *Scanner) GetCollector(collector string) collectors.Collector { + return s.collectors[collector] +} + func (s *Scanner) handleScanRequest(ctx context.Context, r interface{}) { request, ok := r.(sbom.ScanRequest) if !ok { @@ -232,8 +237,8 @@ func (s *Scanner) handleScanRequest(ctx context.Context, r interface{}) { return } - collector, ok := s.collectors[request.Collector()] - if !ok { + collector := s.GetCollector(request.Collector()) + if collector == nil { _ = log.Errorf("invalid collector '%s'", request.Collector()) s.scanQueue.Forget(request) return @@ -276,7 +281,7 @@ func (s *Scanner) processScan(ctx context.Context, request sbom.ScanRequest, img if result == nil { scanContext, cancel := context.WithTimeout(ctx, timeout(collector)) defer cancel() - result = s.performScan(scanContext, request, collector) + result = s.PerformScan(scanContext, request, collector) errorType = "scan" } sendResult(ctx, request.ID(), result, collector) @@ -299,7 +304,8 @@ func (s *Scanner) checkDiskSpace(imgMeta *workloadmeta.ContainerImageMetadata, c return result } -func (s *Scanner) performScan(ctx context.Context, request sbom.ScanRequest, collector collectors.Collector) *sbom.ScanResult { +// PerformScan processes a scan request with the selected collector and returns the SBOM +func (s *Scanner) PerformScan(ctx context.Context, request sbom.ScanRequest, collector collectors.Collector) *sbom.ScanResult { createdAt := time.Now() s.cacheMutex.Lock() diff --git a/pkg/sbom/scanner/scanner_test.go b/pkg/sbom/scanner/scanner_test.go index ba522b63cbefc..e10743f19828d 100644 --- a/pkg/sbom/scanner/scanner_test.go +++ b/pkg/sbom/scanner/scanner_test.go @@ -21,7 +21,6 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" @@ -121,7 +120,7 @@ func TestRetryLogic_Error(t *testing.T) { mockCollector.On("Type").Return(tt.st) // Set up the configuration as the default one is too slow - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) cfg.Set("sbom.scan_queue.base_backoff", "200ms", model.SourceAgentRuntime) cfg.Set("sbom.scan_queue.max_backoff", "600ms", model.SourceAgentRuntime) cfg.Set("sbom.cache.clean_interval", "10s", model.SourceAgentRuntime) // Required for the ticker @@ -186,7 +185,7 @@ func TestRetryLogic_ImageDeleted(t *testing.T) { mockCollector.On("Type").Return(collectors.ContainerImageScanType) // Set up the configuration as the default one is too slow - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) cfg.Set("sbom.scan_queue.base_backoff", "200ms", model.SourceAgentRuntime) cfg.Set("sbom.scan_queue.max_backoff", "600ms", model.SourceAgentRuntime) cfg.Set("sbom.cache.clean_interval", "10s", model.SourceAgentRuntime) // Required for the ticker @@ -251,7 +250,7 @@ func TestRetryChannelFull(t *testing.T) { mockCollector.On("Type").Return(collectors.ContainerImageScanType) // Set up the configuration - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) cfg.Set("sbom.scan_queue.base_backoff", "200ms", model.SourceAgentRuntime) cfg.Set("sbom.scan_queue.max_backoff", "600ms", model.SourceAgentRuntime) cfg.Set("sbom.cache.clean_interval", "10s", model.SourceAgentRuntime) // Required for the ticker diff --git a/pkg/sbom/types/types.go b/pkg/sbom/types/types.go new file mode 100644 index 0000000000000..8b5989cd5ed23 --- /dev/null +++ b/pkg/sbom/types/types.go @@ -0,0 +1,34 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package types holds sbom related types +package types + +import "time" + +// ScanRequest defines the scan request interface +type ScanRequest interface { + Collector() string + Type(ScanOptions) string + ID() string +} + +// ScanOptions defines the scan options +type ScanOptions struct { + Analyzers []string + CheckDiskUsage bool + MinAvailableDisk uint64 + Timeout time.Duration + WaitAfter time.Duration + Fast bool + CollectFiles bool + UseMount bool + OverlayFsScan bool +} + +const ( + ScanFilesystemType = "filesystem" // ScanFilesystemType defines the type for file-system scan + ScanDaemonType = "daemon" // ScanDaemonType defines the type for daemon scan +) diff --git a/pkg/security/agent/client.go b/pkg/security/agent/client.go index 46325e9b60722..317cd472695c4 100644 --- a/pkg/security/agent/client.go +++ b/pkg/security/agent/client.go @@ -18,7 +18,7 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials/insecure" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/proto/api" ) @@ -182,7 +182,7 @@ func (c *RuntimeSecurityClient) Close() { // NewRuntimeSecurityClient instantiates a new RuntimeSecurityClient func NewRuntimeSecurityClient() (*RuntimeSecurityClient, error) { - socketPath := coreconfig.Datadog().GetString("runtime_security_config.socket") + socketPath := pkgconfigsetup.Datadog().GetString("runtime_security_config.socket") if socketPath == "" { return nil, errors.New("runtime_security_config.socket must be set") } diff --git a/pkg/security/common/logs_context.go b/pkg/security/common/logs_context.go index a274f6bbba851..7ddddf0701a6d 100644 --- a/pkg/security/common/logs_context.go +++ b/pkg/security/common/logs_context.go @@ -10,7 +10,7 @@ import ( "fmt" logsconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/client" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -22,7 +22,7 @@ const ( // NewLogContextCompliance returns the context fields to send compliance events to the intake func NewLogContextCompliance() (*logsconfig.Endpoints, *client.DestinationsContext, error) { - logsConfigComplianceKeys := logsconfig.NewLogsConfigKeys("compliance_config.endpoints.", pkgconfig.Datadog()) + logsConfigComplianceKeys := logsconfig.NewLogsConfigKeys("compliance_config.endpoints.", pkgconfigsetup.Datadog()) return NewLogContext(logsConfigComplianceKeys, "cspm-intake.", "compliance", logsconfig.DefaultIntakeOrigin, logsconfig.AgentJSONIntakeProtocol) } @@ -39,18 +39,18 @@ func NewLogContextRuntime(useSecRuntimeTrack bool) (*logsconfig.Endpoints, *clie trackType = "logs" } - logsRuntimeConfigKeys := logsconfig.NewLogsConfigKeys("runtime_security_config.endpoints.", pkgconfig.Datadog()) + logsRuntimeConfigKeys := logsconfig.NewLogsConfigKeys("runtime_security_config.endpoints.", pkgconfigsetup.Datadog()) return NewLogContext(logsRuntimeConfigKeys, "runtime-security-http-intake.logs.", trackType, cwsIntakeOrigin, logsconfig.DefaultIntakeProtocol) } // NewLogContext returns the context fields to send events to the intake func NewLogContext(logsConfig *logsconfig.LogsConfigKeys, endpointPrefix string, intakeTrackType logsconfig.IntakeTrackType, intakeOrigin logsconfig.IntakeOrigin, intakeProtocol logsconfig.IntakeProtocol) (*logsconfig.Endpoints, *client.DestinationsContext, error) { - endpoints, err := logsconfig.BuildHTTPEndpointsWithConfig(pkgconfig.Datadog(), logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin) + endpoints, err := logsconfig.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin) if err != nil { - endpoints, err = logsconfig.BuildHTTPEndpoints(pkgconfig.Datadog(), intakeTrackType, intakeProtocol, intakeOrigin) + endpoints, err = logsconfig.BuildHTTPEndpoints(pkgconfigsetup.Datadog(), intakeTrackType, intakeProtocol, intakeOrigin) if err == nil { - httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, pkgconfig.Datadog()) - endpoints, err = logsconfig.BuildEndpoints(pkgconfig.Datadog(), httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin) + httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, pkgconfigsetup.Datadog()) + endpoints, err = logsconfig.BuildEndpoints(pkgconfigsetup.Datadog(), httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin) } } diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go index f854a755126ec..8ca5c7ef33ae8 100644 --- a/pkg/security/config/config.go +++ b/pkg/security/config/config.go @@ -15,7 +15,8 @@ import ( sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" logsconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" pconfig "github.com/DataDog/datadog-agent/pkg/security/probe/config" @@ -156,8 +157,6 @@ type RuntimeSecurityConfig struct { SecurityProfileCacheSize int // SecurityProfileMaxCount defines the maximum number of Security Profiles that may be evaluated concurrently SecurityProfileMaxCount int - // SecurityProfileRCEnabled defines if remote-configuration is enabled - SecurityProfileRCEnabled bool // SecurityProfileDNSMatchMaxDepth defines the max depth of subdomain to be matched for DNS anomaly detection (0 to match everything) SecurityProfileDNSMatchMaxDepth int @@ -301,7 +300,7 @@ func NewConfig() (*Config, error) { // NewRuntimeSecurityConfig returns the runtime security (CWS) config, build from the system probe one func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { - sysconfig.Adjust(coreconfig.SystemProbe()) + sysconfig.Adjust(pkgconfigsetup.SystemProbe()) eventTypeStrings := map[string]model.EventType{} @@ -323,65 +322,65 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { } rsConfig := &RuntimeSecurityConfig{ - RuntimeEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enabled"), - FIMEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.fim_enabled"), - WindowsFilenameCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.windows_filename_cache_max"), - WindowsRegistryCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.windows_registry_cache_max"), - ETWEventsChannelSize: coreconfig.SystemProbe().GetInt("runtime_security_config.etw_events_channel_size"), - ETWEventsMaxBuffers: coreconfig.SystemProbe().GetInt("runtime_security_config.etw_events_max_buffers"), - WindowsProbeBlockOnChannelSend: coreconfig.SystemProbe().GetBool("runtime_security_config.windows_probe_block_on_channel_send"), - - SocketPath: coreconfig.SystemProbe().GetString("runtime_security_config.socket"), - EventServerBurst: coreconfig.SystemProbe().GetInt("runtime_security_config.event_server.burst"), - EventServerRate: coreconfig.SystemProbe().GetInt("runtime_security_config.event_server.rate"), - EventServerRetention: coreconfig.SystemProbe().GetDuration("runtime_security_config.event_server.retention"), - - SelfTestEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.self_test.enabled"), - SelfTestSendReport: coreconfig.SystemProbe().GetBool("runtime_security_config.self_test.send_report"), + RuntimeEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enabled"), + FIMEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.fim_enabled"), + WindowsFilenameCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.windows_filename_cache_max"), + WindowsRegistryCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.windows_registry_cache_max"), + ETWEventsChannelSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.etw_events_channel_size"), + ETWEventsMaxBuffers: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.etw_events_max_buffers"), + WindowsProbeBlockOnChannelSend: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.windows_probe_block_on_channel_send"), + + SocketPath: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.socket"), + EventServerBurst: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.event_server.burst"), + EventServerRate: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.event_server.rate"), + EventServerRetention: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.event_server.retention"), + + SelfTestEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.self_test.enabled"), + SelfTestSendReport: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.self_test.send_report"), RemoteConfigurationEnabled: isRemoteConfigEnabled(), - RemoteConfigurationDumpPolicies: coreconfig.SystemProbe().GetBool("runtime_security_config.remote_configuration.dump_policies"), + RemoteConfigurationDumpPolicies: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.remote_configuration.dump_policies"), - OnDemandEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.on_demand.enabled"), - OnDemandRateLimiterEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.on_demand.rate_limiter.enabled"), + OnDemandEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.on_demand.enabled"), + OnDemandRateLimiterEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.on_demand.rate_limiter.enabled"), // policy & ruleset - PoliciesDir: coreconfig.SystemProbe().GetString("runtime_security_config.policies.dir"), - WatchPoliciesDir: coreconfig.SystemProbe().GetBool("runtime_security_config.policies.watch_dir"), - PolicyMonitorEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.policies.monitor.enabled"), - PolicyMonitorPerRuleEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.policies.monitor.per_rule_enabled"), - PolicyMonitorReportInternalPolicies: coreconfig.SystemProbe().GetBool("runtime_security_config.policies.monitor.report_internal_policies"), + PoliciesDir: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.policies.dir"), + WatchPoliciesDir: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.watch_dir"), + PolicyMonitorEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.monitor.enabled"), + PolicyMonitorPerRuleEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.monitor.per_rule_enabled"), + PolicyMonitorReportInternalPolicies: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.monitor.report_internal_policies"), - LogPatterns: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.log_patterns"), - LogTags: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.log_tags"), + LogPatterns: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.log_patterns"), + LogTags: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.log_tags"), // custom events - InternalMonitoringEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.internal_monitoring.enabled"), + InternalMonitoringEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.internal_monitoring.enabled"), // activity dump - ActivityDumpEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.enabled"), - ActivityDumpCleanupPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.cleanup_period"), - ActivityDumpTagsResolutionPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.tags_resolution_period"), - ActivityDumpLoadControlPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.load_controller_period"), - ActivityDumpLoadControlMinDumpTimeout: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.min_timeout"), - ActivityDumpTracedCgroupsCount: coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.traced_cgroups_count"), - ActivityDumpTracedEventTypes: parseEventTypeStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.traced_event_types")), - ActivityDumpCgroupDumpTimeout: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.dump_duration"), - ActivityDumpRateLimiter: coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.rate_limiter"), - ActivityDumpCgroupWaitListTimeout: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.cgroup_wait_list_timeout"), - ActivityDumpCgroupDifferentiateArgs: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.cgroup_differentiate_args"), - ActivityDumpLocalStorageDirectory: coreconfig.SystemProbe().GetString("runtime_security_config.activity_dump.local_storage.output_directory"), - ActivityDumpLocalStorageMaxDumpsCount: coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.local_storage.max_dumps_count"), - ActivityDumpLocalStorageCompression: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.local_storage.compression"), - ActivityDumpSyscallMonitorPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.syscall_monitor.period"), - ActivityDumpMaxDumpCountPerWorkload: coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.max_dump_count_per_workload"), - ActivityDumpTagRulesEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.tag_rules.enabled"), - ActivityDumpSilentWorkloadsDelay: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.silent_workloads.delay"), - ActivityDumpSilentWorkloadsTicker: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.silent_workloads.ticker"), - ActivityDumpWorkloadDenyList: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.workload_deny_list"), - ActivityDumpAutoSuppressionEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.auto_suppression.enabled"), + ActivityDumpEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.enabled"), + ActivityDumpCleanupPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.cleanup_period"), + ActivityDumpTagsResolutionPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.tags_resolution_period"), + ActivityDumpLoadControlPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.load_controller_period"), + ActivityDumpLoadControlMinDumpTimeout: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.min_timeout"), + ActivityDumpTracedCgroupsCount: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.traced_cgroups_count"), + ActivityDumpTracedEventTypes: parseEventTypeStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.traced_event_types")), + ActivityDumpCgroupDumpTimeout: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.dump_duration"), + ActivityDumpRateLimiter: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.rate_limiter"), + ActivityDumpCgroupWaitListTimeout: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.cgroup_wait_list_timeout"), + ActivityDumpCgroupDifferentiateArgs: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.cgroup_differentiate_args"), + ActivityDumpLocalStorageDirectory: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.activity_dump.local_storage.output_directory"), + ActivityDumpLocalStorageMaxDumpsCount: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.local_storage.max_dumps_count"), + ActivityDumpLocalStorageCompression: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.local_storage.compression"), + ActivityDumpSyscallMonitorPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.syscall_monitor.period"), + ActivityDumpMaxDumpCountPerWorkload: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.max_dump_count_per_workload"), + ActivityDumpTagRulesEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.tag_rules.enabled"), + ActivityDumpSilentWorkloadsDelay: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.silent_workloads.delay"), + ActivityDumpSilentWorkloadsTicker: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.silent_workloads.ticker"), + ActivityDumpWorkloadDenyList: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.workload_deny_list"), + ActivityDumpAutoSuppressionEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.auto_suppression.enabled"), // activity dump dynamic fields ActivityDumpMaxDumpSize: func() int { - mds := coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.max_dump_size") + mds := pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.max_dump_size") if mds < ADMinMaxDumSize { mds = ADMinMaxDumSize } @@ -389,66 +388,65 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { }, // SBOM resolver - SBOMResolverEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.sbom.enabled"), - SBOMResolverWorkloadsCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.sbom.workloads_cache_size"), - SBOMResolverHostEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.sbom.host.enabled"), + SBOMResolverEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.sbom.enabled"), + SBOMResolverWorkloadsCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.sbom.workloads_cache_size"), + SBOMResolverHostEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.sbom.host.enabled"), // Hash resolver - HashResolverEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.hash_resolver.enabled"), - HashResolverEventTypes: parseEventTypeStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.hash_resolver.event_types")), - HashResolverMaxFileSize: coreconfig.SystemProbe().GetInt64("runtime_security_config.hash_resolver.max_file_size"), - HashResolverHashAlgorithms: parseHashAlgorithmStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.hash_resolver.hash_algorithms")), - HashResolverMaxHashBurst: coreconfig.SystemProbe().GetInt("runtime_security_config.hash_resolver.max_hash_burst"), - HashResolverMaxHashRate: coreconfig.SystemProbe().GetInt("runtime_security_config.hash_resolver.max_hash_rate"), - HashResolverCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.hash_resolver.cache_size"), - HashResolverReplace: coreconfig.SystemProbe().GetStringMapString("runtime_security_config.hash_resolver.replace"), + HashResolverEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.hash_resolver.enabled"), + HashResolverEventTypes: parseEventTypeStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.hash_resolver.event_types")), + HashResolverMaxFileSize: pkgconfigsetup.SystemProbe().GetInt64("runtime_security_config.hash_resolver.max_file_size"), + HashResolverHashAlgorithms: parseHashAlgorithmStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.hash_resolver.hash_algorithms")), + HashResolverMaxHashBurst: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.hash_resolver.max_hash_burst"), + HashResolverMaxHashRate: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.hash_resolver.max_hash_rate"), + HashResolverCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.hash_resolver.cache_size"), + HashResolverReplace: pkgconfigsetup.SystemProbe().GetStringMapString("runtime_security_config.hash_resolver.replace"), // security profiles - SecurityProfileEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.enabled"), - SecurityProfileMaxImageTags: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.max_image_tags"), - SecurityProfileDir: coreconfig.SystemProbe().GetString("runtime_security_config.security_profile.dir"), - SecurityProfileWatchDir: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.watch_dir"), - SecurityProfileCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.cache_size"), - SecurityProfileMaxCount: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.max_count"), - SecurityProfileRCEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.remote_configuration.enabled"), - SecurityProfileDNSMatchMaxDepth: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.dns_match_max_depth"), + SecurityProfileEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.enabled"), + SecurityProfileMaxImageTags: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.max_image_tags"), + SecurityProfileDir: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.security_profile.dir"), + SecurityProfileWatchDir: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.watch_dir"), + SecurityProfileCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.cache_size"), + SecurityProfileMaxCount: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.max_count"), + SecurityProfileDNSMatchMaxDepth: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.dns_match_max_depth"), // auto suppression - SecurityProfileAutoSuppressionEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.auto_suppression.enabled"), - SecurityProfileAutoSuppressionEventTypes: parseEventTypeStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.security_profile.auto_suppression.event_types")), + SecurityProfileAutoSuppressionEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.auto_suppression.enabled"), + SecurityProfileAutoSuppressionEventTypes: parseEventTypeStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.security_profile.auto_suppression.event_types")), // anomaly detection - AnomalyDetectionEventTypes: parseEventTypeStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.security_profile.anomaly_detection.event_types")), - AnomalyDetectionDefaultMinimumStablePeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.default_minimum_stable_period"), - AnomalyDetectionMinimumStablePeriods: parseEventTypeDurations(coreconfig.SystemProbe(), "runtime_security_config.security_profile.anomaly_detection.minimum_stable_period"), - AnomalyDetectionWorkloadWarmupPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.workload_warmup_period"), - AnomalyDetectionUnstableProfileTimeThreshold: coreconfig.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.unstable_profile_time_threshold"), - AnomalyDetectionUnstableProfileSizeThreshold: coreconfig.SystemProbe().GetInt64("runtime_security_config.security_profile.anomaly_detection.unstable_profile_size_threshold"), - AnomalyDetectionRateLimiterPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.rate_limiter.period"), - AnomalyDetectionRateLimiterNumKeys: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_keys"), - AnomalyDetectionRateLimiterNumEventsAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_events_allowed"), - AnomalyDetectionTagRulesEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.tag_rules.enabled"), - AnomalyDetectionSilentRuleEventsEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.silent_rule_events.enabled"), - AnomalyDetectionEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.enabled"), + AnomalyDetectionEventTypes: parseEventTypeStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.security_profile.anomaly_detection.event_types")), + AnomalyDetectionDefaultMinimumStablePeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.default_minimum_stable_period"), + AnomalyDetectionMinimumStablePeriods: parseEventTypeDurations(pkgconfigsetup.SystemProbe(), "runtime_security_config.security_profile.anomaly_detection.minimum_stable_period"), + AnomalyDetectionWorkloadWarmupPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.workload_warmup_period"), + AnomalyDetectionUnstableProfileTimeThreshold: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.unstable_profile_time_threshold"), + AnomalyDetectionUnstableProfileSizeThreshold: pkgconfigsetup.SystemProbe().GetInt64("runtime_security_config.security_profile.anomaly_detection.unstable_profile_size_threshold"), + AnomalyDetectionRateLimiterPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.rate_limiter.period"), + AnomalyDetectionRateLimiterNumKeys: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_keys"), + AnomalyDetectionRateLimiterNumEventsAllowed: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_events_allowed"), + AnomalyDetectionTagRulesEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.tag_rules.enabled"), + AnomalyDetectionSilentRuleEventsEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.silent_rule_events.enabled"), + AnomalyDetectionEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.enabled"), // enforcement - EnforcementEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.enabled"), - EnforcementBinaryExcluded: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.exclude_binaries"), - EnforcementRawSyscallEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.raw_syscall.enabled"), - EnforcementRuleSourceAllowed: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.rule_source_allowed"), - EnforcementDisarmerContainerEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.container.enabled"), - EnforcementDisarmerContainerMaxAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.container.max_allowed"), - EnforcementDisarmerContainerPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.container.period"), - EnforcementDisarmerExecutableEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.executable.enabled"), - EnforcementDisarmerExecutableMaxAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.executable.max_allowed"), - EnforcementDisarmerExecutablePeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.executable.period"), + EnforcementEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enforcement.enabled"), + EnforcementBinaryExcluded: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.enforcement.exclude_binaries"), + EnforcementRawSyscallEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enforcement.raw_syscall.enabled"), + EnforcementRuleSourceAllowed: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.enforcement.rule_source_allowed"), + EnforcementDisarmerContainerEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.container.enabled"), + EnforcementDisarmerContainerMaxAllowed: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.container.max_allowed"), + EnforcementDisarmerContainerPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.container.period"), + EnforcementDisarmerExecutableEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.executable.enabled"), + EnforcementDisarmerExecutableMaxAllowed: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.executable.max_allowed"), + EnforcementDisarmerExecutablePeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.executable.period"), // User Sessions - UserSessionsCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.user_sessions.cache_size"), + UserSessionsCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.user_sessions.cache_size"), // ebpf less - EBPFLessEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.ebpfless.enabled"), - EBPFLessSocket: coreconfig.SystemProbe().GetString("runtime_security_config.ebpfless.socket"), + EBPFLessEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.ebpfless.enabled"), + EBPFLessSocket: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.ebpfless.socket"), // IMDS IMDSIPv4: parseIMDSIPv4(), @@ -468,7 +466,7 @@ func (c *RuntimeSecurityConfig) IsRuntimeEnabled() bool { // parseIMDSIPv4 returns the uint32 representation of the IMDS IP set by the configuration func parseIMDSIPv4() uint32 { - ip := coreconfig.SystemProbe().GetString("runtime_security_config.imds_ipv4") + ip := pkgconfigsetup.SystemProbe().GetString("runtime_security_config.imds_ipv4") parsedIP := net.ParseIP(ip) if parsedIP == nil { return 0 @@ -479,13 +477,13 @@ func parseIMDSIPv4() uint32 { // If RC is globally enabled, RC is enabled for CWS, unless the CWS-specific RC value is explicitly set to false func isRemoteConfigEnabled() bool { // This value defaults to true - rcEnabledInSysprobeConfig := coreconfig.SystemProbe().GetBool("runtime_security_config.remote_configuration.enabled") + rcEnabledInSysprobeConfig := pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.remote_configuration.enabled") if !rcEnabledInSysprobeConfig { return false } - if coreconfig.IsRemoteConfigEnabled(coreconfig.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { return true } @@ -502,13 +500,13 @@ func (c *RuntimeSecurityConfig) GetAnomalyDetectionMinimumStablePeriod(eventType // sanitize ensures that the configuration is properly setup func (c *RuntimeSecurityConfig) sanitize() error { - serviceName := utils.GetTagValue("service", configUtils.GetConfiguredTags(coreconfig.Datadog(), true)) + serviceName := utils.GetTagValue("service", configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true)) if len(serviceName) > 0 { c.HostServiceName = serviceName } if c.IMDSIPv4 == 0 { - return fmt.Errorf("invalid IPv4 address: got %v", coreconfig.SystemProbe().GetString("runtime_security_config.imds_ipv4")) + return fmt.Errorf("invalid IPv4 address: got %v", pkgconfigsetup.SystemProbe().GetString("runtime_security_config.imds_ipv4")) } if c.EnforcementDisarmerContainerEnabled && c.EnforcementDisarmerContainerMaxAllowed <= 0 { @@ -519,6 +517,8 @@ func (c *RuntimeSecurityConfig) sanitize() error { return fmt.Errorf("invalid value for runtime_security_config.enforcement.disarmer.executable.max_allowed: %d", c.EnforcementDisarmerExecutableMaxAllowed) } + c.sanitizePlatform() + return c.sanitizeRuntimeSecurityConfigActivityDump() } @@ -535,7 +535,7 @@ func (c *RuntimeSecurityConfig) sanitizeRuntimeSecurityConfigActivityDump() erro c.ActivityDumpTracedEventTypes = append(c.ActivityDumpTracedEventTypes, model.ExecEventType) } - if formats := coreconfig.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.local_storage.formats"); len(formats) > 0 { + if formats := pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.local_storage.formats"); len(formats) > 0 { var err error c.ActivityDumpLocalStorageFormats, err = ParseStorageFormats(formats) if err != nil { @@ -556,13 +556,13 @@ func (c *RuntimeSecurityConfig) sanitizeRuntimeSecurityConfigActivityDump() erro // ActivityDumpRemoteStorageEndpoints returns the list of activity dump remote storage endpoints parsed from the agent config func ActivityDumpRemoteStorageEndpoints(endpointPrefix string, intakeTrackType logsconfig.IntakeTrackType, intakeProtocol logsconfig.IntakeProtocol, intakeOrigin logsconfig.IntakeOrigin) (*logsconfig.Endpoints, error) { - logsConfig := logsconfig.NewLogsConfigKeys("runtime_security_config.activity_dump.remote_storage.endpoints.", coreconfig.Datadog()) - endpoints, err := logsconfig.BuildHTTPEndpointsWithConfig(coreconfig.Datadog(), logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin) + logsConfig := logsconfig.NewLogsConfigKeys("runtime_security_config.activity_dump.remote_storage.endpoints.", pkgconfigsetup.Datadog()) + endpoints, err := logsconfig.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin) if err != nil { - endpoints, err = logsconfig.BuildHTTPEndpoints(coreconfig.Datadog(), intakeTrackType, intakeProtocol, intakeOrigin) + endpoints, err = logsconfig.BuildHTTPEndpoints(pkgconfigsetup.Datadog(), intakeTrackType, intakeProtocol, intakeOrigin) if err == nil { - httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, coreconfig.Datadog()) - endpoints, err = logsconfig.BuildEndpoints(coreconfig.Datadog(), httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin) + httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, pkgconfigsetup.Datadog()) + endpoints, err = logsconfig.BuildEndpoints(pkgconfigsetup.Datadog(), httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin) } } @@ -589,7 +589,7 @@ func ParseEvalEventType(eventType eval.EventType) model.EventType { } // parseEventTypeDurations converts a map of durations indexed by event types -func parseEventTypeDurations(cfg coreconfig.Config, prefix string) map[model.EventType]time.Duration { +func parseEventTypeDurations(cfg pkgconfigmodel.Config, prefix string) map[model.EventType]time.Duration { eventTypeMap := cfg.GetStringMap(prefix) eventTypeDurations := make(map[model.EventType]time.Duration, len(eventTypeMap)) for eventType := range eventTypeMap { diff --git a/pkg/security/config/config_linux.go b/pkg/security/config/config_linux.go new file mode 100644 index 0000000000000..02062f9def762 --- /dev/null +++ b/pkg/security/config/config_linux.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package config holds config related files +package config + +func (c *RuntimeSecurityConfig) sanitizePlatform() { + // Force the disable of features unavailable on EBPFLess + if c.EBPFLessEnabled { + c.ActivityDumpEnabled = false + c.SecurityProfileEnabled = false + } +} diff --git a/pkg/config/test_helpers.go b/pkg/security/config/config_others.go similarity index 51% rename from pkg/config/test_helpers.go rename to pkg/security/config/config_others.go index 6642302554da5..91da3e31277fb 100644 --- a/pkg/config/test_helpers.go +++ b/pkg/security/config/config_others.go @@ -3,17 +3,13 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build test +//go:build !linux +// Package config holds config related files package config -import ( - "github.com/DataDog/datadog-agent/pkg/config/env" -) - -var ( - // SetFeatures is alias from env - SetFeatures = env.SetFeatures - // SetFeaturesNoCleanup is alias from env - SetFeaturesNoCleanup = env.SetFeaturesNoCleanup -) +func (c *RuntimeSecurityConfig) sanitizePlatform() { + // Force the disable of features unavailable on Windows + c.ActivityDumpEnabled = false + c.SecurityProfileEnabled = false +} diff --git a/pkg/security/ebpf/c/include/constants/custom.h b/pkg/security/ebpf/c/include/constants/custom.h index 181460e586ce1..b257e0f342031 100644 --- a/pkg/security/ebpf/c/include/constants/custom.h +++ b/pkg/security/ebpf/c/include/constants/custom.h @@ -12,15 +12,6 @@ #define MAX_PATH_LEN 256 #define REVISION_ARRAY_SIZE 4096 #define INODE_DISCARDER_TYPE 0 -#define BASENAME_APPROVER_TYPE 0 -#define FLAG_APPROVER_TYPE 1 - -enum MONITOR_KEYS -{ - ERPC_MONITOR_KEY = 1, - DISCARDER_MONITOR_KEY, - APPROVER_MONITOR_KEY, -}; #define PATH_ID_MAP_SIZE 512 diff --git a/pkg/security/ebpf/c/include/constants/enums.h b/pkg/security/ebpf/c/include/constants/enums.h index 859a3230465c1..ad37ad7885112 100644 --- a/pkg/security/ebpf/c/include/constants/enums.h +++ b/pkg/security/ebpf/c/include/constants/enums.h @@ -87,28 +87,35 @@ enum enum policy_mode { NO_FILTER = 0, - ACCEPT = 1, - DENY = 2, + ACCEPT, + DENY, }; -enum policy_flags +enum APPROVER_TYPE { - BASENAME = 1, - FLAGS = 2, - MODE = 4, - PARENT_NAME = 8, + BASENAME_APPROVER_TYPE = 0, + FLAG_APPROVER_TYPE, + AUID_APPROVER_TYPE, }; -enum tls_format +enum SYSCALL_STATE { - DEFAULT_TLS_FORMAT + ACCEPTED = 0, // approved and can't be discarded later + APPROVED, // approved but can be discarded later + DISCARDED, // discarded +}; + +enum MONITOR_KEYS +{ + ERPC_MONITOR_KEY = 1, + DISCARDER_MONITOR_KEY, + APPROVER_MONITOR_KEY, }; -typedef enum discard_check_state +enum tls_format { - NOT_DISCARDED, - DISCARDED, -} discard_check_state; + DEFAULT_TLS_FORMAT +}; enum bpf_cmd_def { diff --git a/pkg/security/ebpf/c/include/events_definition.h b/pkg/security/ebpf/c/include/events_definition.h index bc934370048bf..b7c8195c35dbe 100644 --- a/pkg/security/ebpf/c/include/events_definition.h +++ b/pkg/security/ebpf/c/include/events_definition.h @@ -112,10 +112,9 @@ struct cgroup_tracing_event_t { struct cgroup_write_event_t { struct kevent_t event; - struct process_context_t process; - struct span_context_t span; - struct container_context_t container; struct file_t file; + u32 pid; // pid of the process added to the cgroup + u32 cgroup_flags; }; struct utimes_event_t { diff --git a/pkg/security/ebpf/c/include/helpers/approvers.h b/pkg/security/ebpf/c/include/helpers/approvers.h index 02c10694c7a19..d3b68a4d9282f 100644 --- a/pkg/security/ebpf/c/include/helpers/approvers.h +++ b/pkg/security/ebpf/c/include/helpers/approvers.h @@ -20,33 +20,65 @@ void __attribute__((always_inline)) monitor_event_approved(u64 event_type, u32 a __sync_fetch_and_add(&stats->event_approved_by_basename, 1); } else if (approver_type == FLAG_APPROVER_TYPE) { __sync_fetch_and_add(&stats->event_approved_by_flag, 1); + } else if (approver_type == AUID_APPROVER_TYPE) { + __sync_fetch_and_add(&stats->event_approved_by_auid, 1); } } void get_dentry_name(struct dentry *dentry, void *buffer, size_t n); -int __attribute__((always_inline)) approve_by_basename(struct dentry *dentry, u64 event_type) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_by_auid(struct syscall_cache_t *syscall, u64 event_type) { + u32 pid = bpf_get_current_pid_tgid() >> 32; + struct pid_cache_t *pid_entry = (struct pid_cache_t *)bpf_map_lookup_elem(&pid_cache, &pid); + if (!pid_entry || !pid_entry->credentials.is_auid_set) { + return DISCARDED; + } + + u32 auid = pid_entry->credentials.auid; + + struct event_mask_filter_t *mask_filter = bpf_map_lookup_elem(&auid_approvers, &auid); + if (mask_filter && mask_filter->event_mask & (1 << (event_type - 1))) { + monitor_event_approved(syscall->type, AUID_APPROVER_TYPE); + return ACCEPTED; + } + + struct u32_range_filter_t *range_filter = bpf_map_lookup_elem(&auid_range_approvers, &event_type); + if (range_filter && auid >= range_filter->min && auid <= range_filter->max) { + monitor_event_approved(syscall->type, AUID_APPROVER_TYPE); + return ACCEPTED; + } + + return DISCARDED; +} + +enum SYSCALL_STATE __attribute__((always_inline)) approve_by_basename(struct dentry *dentry, u64 event_type) { struct basename_t basename = {}; get_dentry_name(dentry, &basename, sizeof(basename)); - struct basename_filter_t *filter = bpf_map_lookup_elem(&basename_approvers, &basename); + struct event_mask_filter_t *filter = bpf_map_lookup_elem(&basename_approvers, &basename); if (filter && filter->event_mask & (1 << (event_type - 1))) { monitor_event_approved(event_type, BASENAME_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) basename_approver(struct syscall_cache_t *syscall, struct dentry *dentry, u64 event_type) { - return approve_by_basename(dentry, event_type); -} +enum SYSCALL_STATE __attribute__((always_inline)) chmod_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->setattr.dentry, EVENT_CHMOD); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_CHMOD); + } -int __attribute__((always_inline)) chmod_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->setattr.dentry, EVENT_CHMOD); + return state; } -int __attribute__((always_inline)) chown_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->setattr.dentry, EVENT_CHOWN); +enum SYSCALL_STATE __attribute__((always_inline)) chown_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->setattr.dentry, EVENT_CHOWN); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_CHOWN); + } + + return state; } int __attribute__((always_inline)) lookup_u32_flags(void *map, u32 *flags) { @@ -65,106 +97,120 @@ int __attribute__((always_inline)) approve_mmap_by_flags(struct syscall_cache_t int exists = lookup_u32_flags(&mmap_flags_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->mmap.flags & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) approve_mmap_by_protection(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_mmap_by_protection_flags(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&mmap_protection_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((flags == 0 && syscall->mmap.protection == 0) || (syscall->mmap.protection & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) mmap_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = 0; +enum SYSCALL_STATE __attribute__((always_inline)) mmap_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = DISCARDED; if (syscall->mmap.dentry != NULL) { - pass_to_userspace = approve_by_basename(syscall->mmap.dentry, EVENT_MMAP); + state = approve_by_basename(syscall->mmap.dentry, EVENT_MMAP); } - if (!pass_to_userspace) { - pass_to_userspace = approve_mmap_by_protection(syscall); - if (!pass_to_userspace) { - pass_to_userspace = approve_mmap_by_flags(syscall); - } + if (state == DISCARDED) { + state = approve_mmap_by_protection_flags(syscall); + } + if (state == DISCARDED) { + state = approve_mmap_by_flags(syscall); } - return pass_to_userspace; + return state; } -int __attribute__((always_inline)) link_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->link.src_dentry, EVENT_LINK) || - basename_approver(syscall, syscall->link.target_dentry, EVENT_LINK); +enum SYSCALL_STATE __attribute__((always_inline)) link_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->link.src_dentry, EVENT_LINK); + if (state == DISCARDED) { + state = approve_by_basename(syscall->link.target_dentry, EVENT_LINK); + } + + return state; } -int __attribute__((always_inline)) mkdir_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->mkdir.dentry, EVENT_MKDIR); +enum SYSCALL_STATE __attribute__((always_inline)) mkdir_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->mkdir.dentry, EVENT_MKDIR); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_MKDIR); + } + + return state; } -int __attribute__((always_inline)) chdir_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->chdir.dentry, EVENT_CHDIR); +enum SYSCALL_STATE __attribute__((always_inline)) chdir_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->chdir.dentry, EVENT_CHDIR); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_CHDIR); + } + + return state; } -int __attribute__((always_inline)) approve_mprotect_by_vm_protection(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_mprotect_by_vm_protection(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&mprotect_vm_protection_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->mprotect.vm_protection & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) approve_mprotect_by_req_protection(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_mprotect_by_req_protection(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&mprotect_req_protection_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->mprotect.req_protection & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) mprotect_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = approve_mprotect_by_vm_protection(syscall); - if (!pass_to_userspace) { - pass_to_userspace = approve_mprotect_by_req_protection(syscall); +enum SYSCALL_STATE __attribute__((always_inline)) mprotect_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_mprotect_by_vm_protection(syscall); + if (state == DISCARDED) { + state = approve_mprotect_by_req_protection(syscall); } - return pass_to_userspace; + return state; } -int __attribute__((always_inline)) approve_by_flags(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_open_by_flags(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&open_flags_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((flags == 0 && syscall->open.flags == 0) || ((syscall->open.flags & flags) > 0)) { @@ -174,97 +220,153 @@ int __attribute__((always_inline)) approve_by_flags(struct syscall_cache_t *sysc bpf_printk("open flags %d approved", syscall->open.flags); #endif - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) open_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = approve_by_basename(syscall->open.dentry, EVENT_OPEN); - if (!pass_to_userspace) { - pass_to_userspace = approve_by_flags(syscall); +enum SYSCALL_STATE __attribute__((always_inline)) open_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->open.dentry, EVENT_OPEN); + if (state == DISCARDED) { + state = approve_open_by_flags(syscall); + } + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_OPEN); } - return pass_to_userspace; + return state; } -int __attribute__((always_inline)) rename_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->rename.src_dentry, EVENT_RENAME) || - basename_approver(syscall, syscall->rename.target_dentry, EVENT_RENAME); +enum SYSCALL_STATE __attribute__((always_inline)) rename_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->rename.src_dentry, EVENT_RENAME); + if (state == DISCARDED) { + state = approve_by_basename(syscall->rename.target_dentry, EVENT_RENAME); + } + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_RENAME); + } + + return state; } -int __attribute__((always_inline)) rmdir_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->rmdir.dentry, EVENT_RMDIR); +enum SYSCALL_STATE __attribute__((always_inline)) rmdir_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->rmdir.dentry, EVENT_RMDIR); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_RMDIR); + } + return state; } -int __attribute__((always_inline)) approve_splice_by_entry_flags(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_splice_by_entry_flags(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&splice_entry_flags_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->splice.pipe_entry_flag & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) approve_splice_by_exit_flags(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_splice_by_exit_flags(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&splice_exit_flags_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->splice.pipe_exit_flag & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) splice_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = 0; +enum SYSCALL_STATE __attribute__((always_inline)) splice_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = DISCARDED; if (syscall->splice.dentry != NULL) { - pass_to_userspace = approve_by_basename(syscall->splice.dentry, EVENT_SPLICE); + state = approve_by_basename(syscall->splice.dentry, EVENT_SPLICE); } - if (!pass_to_userspace) { - pass_to_userspace = approve_splice_by_exit_flags(syscall); - if (!pass_to_userspace) { - pass_to_userspace = approve_splice_by_entry_flags(syscall); - } + if (state == DISCARDED) { + state = approve_splice_by_exit_flags(syscall); + } + if (state == DISCARDED) { + state = approve_splice_by_entry_flags(syscall); } - return pass_to_userspace; + return state; } -int __attribute__((always_inline)) unlink_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->unlink.dentry, EVENT_UNLINK); +enum SYSCALL_STATE __attribute__((always_inline)) unlink_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->unlink.dentry, EVENT_UNLINK); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_UNLINK); + } + return state; } -int __attribute__((always_inline)) utime_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->setattr.dentry, EVENT_UTIME); +enum SYSCALL_STATE __attribute__((always_inline)) utime_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->setattr.dentry, EVENT_UTIME); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_UTIME); + } + return state; } -int __attribute__((always_inline)) bpf_approvers(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) bpf_approvers(struct syscall_cache_t *syscall) { u32 key = 0; struct u64_flags_filter_t *filter = bpf_map_lookup_elem(&bpf_cmd_approvers, &key); if (filter == NULL || !filter->is_set) { - return 0; + return DISCARDED; } if (((1 << syscall->bpf.cmd) & filter->flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; + } + + return DISCARDED; +} + +enum SYSCALL_STATE __attribute__((always_inline)) approve_syscall(struct syscall_cache_t *syscall, enum SYSCALL_STATE (*check_approvers)(struct syscall_cache_t *syscall)) { + if (syscall->policy.mode == NO_FILTER) { + return syscall->state = ACCEPTED; + } + + if (syscall->policy.mode == ACCEPT) { + return syscall->state = APPROVED; + } + + if (syscall->policy.mode == DENY) { + syscall->state = check_approvers(syscall); + } + + u32 tgid = bpf_get_current_pid_tgid() >> 32; + u64 *cookie = bpf_map_lookup_elem(&traced_pids, &tgid); + if (cookie != NULL) { + u64 now = bpf_ktime_get_ns(); + struct activity_dump_config *config = lookup_or_delete_traced_pid(tgid, now, cookie); + if (config != NULL) { + // is this event type traced ? + if (mask_has_event(config->event_mask, syscall->type) && activity_dump_rate_limiter_allow(config, *cookie, now, 0)) { + if (syscall->state == DISCARDED) { + syscall->resolver.flags |= SAVED_BY_ACTIVITY_DUMP; + } + + // force to be accepted as this event will be part of a dump + syscall->state = ACCEPTED; + } + } } - return 0; + return syscall->state; } #endif diff --git a/pkg/security/ebpf/c/include/helpers/discarders.h b/pkg/security/ebpf/c/include/helpers/discarders.h index 86bc5d3a1c96d..873356c61e9b3 100644 --- a/pkg/security/ebpf/c/include/helpers/discarders.h +++ b/pkg/security/ebpf/c/include/helpers/discarders.h @@ -222,25 +222,25 @@ int __attribute__((always_inline)) discard_inode(u64 event_type, u32 mount_id, u return 0; } -discard_check_state __attribute__((always_inline)) is_discarded_by_inode(struct is_discarded_by_inode_t *params) { +int __attribute__((always_inline)) is_discarded_by_inode(struct is_discarded_by_inode_t *params) { // start with the "normal" discarder check struct inode_discarder_t key = params->discarder; - struct inode_discarder_params_t *inode_params = (struct inode_discarder_params_t *)is_discarded(&inode_discarders, &key, params->discarder_type, params->now); + struct inode_discarder_params_t *inode_params = (struct inode_discarder_params_t *)is_discarded(&inode_discarders, &key, params->event_type, params->now); if (!inode_params) { - return NOT_DISCARDED; + return 0; } bool are_revisions_equal = inode_params->mount_revision == get_mount_discarder_revision(params->discarder.path_key.mount_id); if (!are_revisions_equal) { - return NOT_DISCARDED; + return 0; } u32 revision = get_discarders_revision(); if (inode_params->params.revision != revision) { - return NOT_DISCARDED; + return 0; } - return DISCARDED; + return 1; } int __attribute__((always_inline)) expire_inode_discarders(u32 mount_id, u64 inode) { @@ -283,4 +283,16 @@ int __attribute__((always_inline)) expire_inode_discarders(u32 mount_id, u64 ino return 0; } +static __attribute__((always_inline)) int is_discarded_by_pid() { + return is_runtime_discarded() && is_runtime_request(); +} + +int __attribute__((always_inline)) dentry_resolver_discarder_event_type(struct syscall_cache_t *syscall) { + if (syscall->state == ACCEPTED) { + return 0; + } + + return syscall->type; +} + #endif diff --git a/pkg/security/ebpf/c/include/helpers/exec.h b/pkg/security/ebpf/c/include/helpers/exec.h index 7c5d5ee02170f..99cc6fed57093 100644 --- a/pkg/security/ebpf/c/include/helpers/exec.h +++ b/pkg/security/ebpf/c/include/helpers/exec.h @@ -26,7 +26,7 @@ int __attribute__((always_inline)) handle_exec_event(ctx_t *ctx, struct syscall_ // resolve dentry syscall->resolver.key = syscall->exec.file.path_key; syscall->resolver.dentry = syscall->exec.dentry; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = DR_NO_CALLBACK; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/helpers/syscalls.h b/pkg/security/ebpf/c/include/helpers/syscalls.h index e4c41ccd5e8c2..8de9cdf42ff43 100644 --- a/pkg/security/ebpf/c/include/helpers/syscalls.h +++ b/pkg/security/ebpf/c/include/helpers/syscalls.h @@ -192,48 +192,6 @@ struct syscall_cache_t *__attribute__((always_inline)) pop_syscall(u64 type) { return syscall; } -int __attribute__((always_inline)) discard_syscall(struct syscall_cache_t *syscall) { - u64 key = bpf_get_current_pid_tgid(); - bpf_map_delete_elem(&syscalls, &key); - monitor_syscalls(syscall->type, -1); - return 0; -} - -int __attribute__((always_inline)) mark_as_discarded(struct syscall_cache_t *syscall) { - syscall->discarded = 1; - return 0; -} - -int __attribute__((always_inline)) filter_syscall(struct syscall_cache_t *syscall, int (*check_approvers)(struct syscall_cache_t *syscall)) { - if (syscall->policy.mode == NO_FILTER) { - return 0; - } - - char pass_to_userspace = syscall->policy.mode == ACCEPT ? 1 : 0; - - if (syscall->policy.mode == DENY) { - pass_to_userspace = check_approvers(syscall); - } - - u32 tgid = bpf_get_current_pid_tgid() >> 32; - u64 *cookie = bpf_map_lookup_elem(&traced_pids, &tgid); - if (cookie != NULL) { - u64 now = bpf_ktime_get_ns(); - struct activity_dump_config *config = lookup_or_delete_traced_pid(tgid, now, cookie); - if (config != NULL) { - // is this event type traced ? - if (mask_has_event(config->event_mask, syscall->type) && activity_dump_rate_limiter_allow(config, *cookie, now, 0)) { - if (!pass_to_userspace) { - syscall->resolver.flags |= SAVED_BY_ACTIVITY_DUMP; - } - return 0; - } - } - } - - return !pass_to_userspace; -} - // the following functions must use the {peek,pop}_current_or_impersonated_exec_syscall to retrieve the syscall context // because the task performing the exec syscall may change its pid in the flush_old_exec() kernel function diff --git a/pkg/security/ebpf/c/include/hooks/bpf.h b/pkg/security/ebpf/c/include/hooks/bpf.h index 8b1c9f3bca35c..a39e587faa9a1 100644 --- a/pkg/security/ebpf/c/include/hooks/bpf.h +++ b/pkg/security/ebpf/c/include/hooks/bpf.h @@ -50,6 +50,10 @@ __attribute__((always_inline)) void send_bpf_event(void *ctx, struct syscall_cac } HOOK_SYSCALL_ENTRY3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_BPF); struct syscall_cache_t syscall = { .policy = policy, @@ -71,8 +75,8 @@ __attribute__((always_inline)) int sys_bpf_ret(void *ctx, int retval) { return 0; } - if (filter_syscall(syscall, bpf_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, bpf_approvers) == DISCARDED) { + return 0; } syscall->bpf.retval = retval; diff --git a/pkg/security/ebpf/c/include/hooks/cgroup.h b/pkg/security/ebpf/c/include/hooks/cgroup.h index aa43cc0a333a7..a231142b8f90a 100644 --- a/pkg/security/ebpf/c/include/hooks/cgroup.h +++ b/pkg/security/ebpf/c/include/hooks/cgroup.h @@ -9,6 +9,8 @@ #include "structs/dentry_resolver.h" #include "maps.h" +#define ROOT_CGROUP_PROCS_FILE_INO 2 + static __attribute__((always_inline)) int is_docker_cgroup(ctx_t *ctx, struct dentry *container_d) { struct dentry *parent_d; struct qstr parent_qstr; @@ -47,6 +49,13 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { return 0; } + // from cgroups(7): + // Writing the value 0 to a cgroup.procs file causes the writing process to be moved to the corresponding cgroup. + // in this case we want to grab the tgid of the process that wrote to the file + if (pid == 0) { + pid = bpf_get_current_pid_tgid() >> 32; + } + #ifdef DEBUG_CGROUP bpf_printk("trace__cgroup_write %d\n", pid); #endif @@ -63,7 +72,7 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { // Select the old cache entry old_entry = get_proc_from_cookie(cookie); if (old_entry) { - if (old_entry->container.container_id[0] != '\0') { + if ((old_entry->container.container_id[0] != '\0') && old_entry->container.cgroup_context.cgroup_flags && (old_entry->container.cgroup_context.cgroup_flags != CGROUP_MANAGER_SYSTEMD)) { return 0; } @@ -78,9 +87,9 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { struct dentry *container_d; struct qstr container_qstr; char *container_id; - u32 container_flags = 0; + u32 cgroup_flags = 0; - struct dentry_resolver_input_t cgroup_dentry_resolver; + struct dentry_resolver_input_t cgroup_dentry_resolver = {0}; struct dentry_resolver_input_t *resolver = &cgroup_dentry_resolver; u32 key = 0; @@ -111,7 +120,7 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { container_id = (void *)container_qstr.name; if (is_docker_cgroup(ctx, container_d)) { - container_flags = CGROUP_MANAGER_DOCKER; + cgroup_flags = CGROUP_MANAGER_DOCKER; } break; @@ -135,7 +144,7 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { resolver->dentry = container_d; if (is_docker_cgroup(ctx, container_d)) { - container_flags = CGROUP_MANAGER_DOCKER; + cgroup_flags = CGROUP_MANAGER_DOCKER; } break; @@ -145,6 +154,11 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { return 0; } + // if the process is being moved to the root cgroup then we don't want to track it + if (resolver->key.ino == ROOT_CGROUP_PROCS_FILE_INO) { + return 0; + } + if (bpf_probe_read(prefix, 15, container_id)) return 0; @@ -152,22 +166,22 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { if ((*prefix)[0] == 'd' && (*prefix)[1] == 'o' && (*prefix)[2] == 'c' && (*prefix)[3] == 'k' && (*prefix)[4] == 'e' && (*prefix)[5] == 'r' && (*prefix)[6] == '-') { container_id += 7; // skip "docker-" - container_flags = CGROUP_MANAGER_DOCKER; + cgroup_flags = CGROUP_MANAGER_DOCKER; } else if ((*prefix)[0] == 'c' && (*prefix)[1] == 'r' && (*prefix)[2] == 'i' && (*prefix)[3] == 'o' && (*prefix)[4] == '-') { container_id += 5; // skip "crio-" - container_flags = CGROUP_MANAGER_CRIO; + cgroup_flags = CGROUP_MANAGER_CRIO; } else if ((*prefix)[0] == 'l' && (*prefix)[1] == 'i' && (*prefix)[2] == 'b' && (*prefix)[3] == 'p' && (*prefix)[4] == 'o' && (*prefix)[5] == 'd' && (*prefix)[6] == '-') { container_id += 7; // skip "libpod-" - container_flags = CGROUP_MANAGER_PODMAN; + cgroup_flags = CGROUP_MANAGER_PODMAN; } else if ((*prefix)[0] == 'c' && (*prefix)[1] == 'r' && (*prefix)[2] == 'i' && (*prefix)[3] == '-' && (*prefix)[4] == 'c' && (*prefix)[5] == 'o' && (*prefix)[6] == 'n' && (*prefix)[7] == 't' && (*prefix)[8] == 'a' && (*prefix)[9] == 'i' && (*prefix)[10] == 'n' && (*prefix)[11] == 'e' && (*prefix)[12] == 'r' && (*prefix)[13] == 'd' && (*prefix)[14] == '-') { container_id += 15; // skip "cri-containerd-" - container_flags = CGROUP_MANAGER_CRI; + cgroup_flags = CGROUP_MANAGER_CRI; } #ifdef DEBUG_CGROUP @@ -175,20 +189,21 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { #endif int length = bpf_probe_read_str(prefix, sizeof(cgroup_prefix_t), container_id) & 0xff; - if (container_flags == 0 && ( + if (cgroup_flags == 0 && ( (length >= 9 && (*prefix)[length-9] == '.' && (*prefix)[length-8] == 's' && (*prefix)[length-7] == 'e' && (*prefix)[length-6] == 'r' && (*prefix)[length-5] == 'v' && (*prefix)[length-4] == 'i' && (*prefix)[length-3] == 'c' && (*prefix)[length-2] == 'e') || (length >= 7 && (*prefix)[length-7] == '.' && (*prefix)[length-6] == 's' && (*prefix)[length-5] == 'c' && (*prefix)[length-4] == 'o' && (*prefix)[length-3] == 'p' && (*prefix)[length-2] == 'e') )) { - container_flags = CGROUP_MANAGER_SYSTEMD; + cgroup_flags = CGROUP_MANAGER_SYSTEMD; + } else if (cgroup_flags != 0) { + bpf_probe_read(&new_entry.container.container_id, sizeof(new_entry.container.container_id), container_id); } - bpf_probe_read(&new_entry.container.container_id, sizeof(new_entry.container.container_id), container_id); - new_entry.container.cgroup_context.cgroup_flags = container_flags; + new_entry.container.cgroup_context.cgroup_flags = cgroup_flags; new_entry.container.cgroup_context.cgroup_file = resolver->key; #ifdef DEBUG_CGROUP - bpf_printk("container flags=%d, inode=%d: prefix=%s\n", container_flags, new_entry.container.cgroup_context.cgroup_file.ino, prefix); + bpf_printk("cgroup flags=%d, inode=%d: prefix=%s\n", cgroup_flags, new_entry.container.cgroup_context.cgroup_file.ino, prefix); #endif bpf_map_update_elem(&proc_cache, &cookie, &new_entry, BPF_ANY); @@ -201,12 +216,13 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { } resolver->type = EVENT_CGROUP_WRITE; - resolver->discarder_type = NO_FILTER; + resolver->discarder_event_type = 0; resolver->callback = DR_CGROUP_WRITE_CALLBACK_KPROBE_KEY; resolver->iteration = 0; resolver->ret = 0; resolver->flags = 0; - resolver->sysretval = 0; + resolver->cgroup_write_ctx.cgroup_write_pid = pid; + resolver->cgroup_write_ctx.cgroup_flags = cgroup_flags; resolver->original_key = resolver->key; cache_dentry_resolver_input(resolver); @@ -223,12 +239,10 @@ int __attribute__((always_inline)) dr_cgroup_write_callback(void *ctx) { struct cgroup_write_event_t event = { .file.path_key = inputs->original_key, + .pid = inputs->cgroup_write_ctx.cgroup_write_pid, + .cgroup_flags = inputs->cgroup_write_ctx.cgroup_flags, }; - struct proc_cache_t *entry = fill_process_context(&event.process); - fill_container_context(entry, &event.container); - fill_span_context(&event.span); - send_event(ctx, EVENT_CGROUP_WRITE, event); return 0; diff --git a/pkg/security/ebpf/c/include/hooks/chdir.h b/pkg/security/ebpf/c/include/hooks/chdir.h index b424ec706ddd1..2d1e6c5db11eb 100644 --- a/pkg/security/ebpf/c/include/hooks/chdir.h +++ b/pkg/security/ebpf/c/include/hooks/chdir.h @@ -8,6 +8,10 @@ #include "helpers/syscalls.h" long __attribute__((always_inline)) trace__sys_chdir(const char *path) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_CHDIR); struct syscall_cache_t syscall = { .type = EVENT_CHDIR, @@ -53,8 +57,9 @@ int hook_set_fs_pwd(ctx_t *ctx) { set_file_inode(dentry, &syscall->chdir.file, 0); - if (filter_syscall(syscall, chdir_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, chdir_approvers) == DISCARDED) { + pop_syscall(EVENT_CHDIR); + return 0; } return 0; @@ -66,7 +71,7 @@ int __attribute__((always_inline)) sys_chdir_ret(void *ctx, int retval, int dr_t return 0; } if (IS_UNHANDLED_ERROR(retval)) { - discard_syscall(syscall); + pop_syscall(EVENT_CHDIR); return 0; } @@ -74,7 +79,7 @@ int __attribute__((always_inline)) sys_chdir_ret(void *ctx, int retval, int dr_t syscall->resolver.key = syscall->chdir.file.path_key; syscall->resolver.dentry = syscall->chdir.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_CHDIR : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = select_dr_key(dr_type, DR_CHDIR_CALLBACK_KPROBE_KEY, DR_CHDIR_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/chmod.h b/pkg/security/ebpf/c/include/hooks/chmod.h index 8afd2f1f663ab..f374c6478cc31 100644 --- a/pkg/security/ebpf/c/include/hooks/chmod.h +++ b/pkg/security/ebpf/c/include/hooks/chmod.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_chmod(const char *path, umode_t mode) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_CHMOD); struct syscall_cache_t syscall = { .type = EVENT_CHMOD, diff --git a/pkg/security/ebpf/c/include/hooks/chown.h b/pkg/security/ebpf/c/include/hooks/chown.h index 248d366aef68c..3bb4cbf243141 100644 --- a/pkg/security/ebpf/c/include/hooks/chown.h +++ b/pkg/security/ebpf/c/include/hooks/chown.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_chown(const char *filename, uid_t user, gid_t group) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_CHOWN); struct syscall_cache_t syscall = { .type = EVENT_CHOWN, diff --git a/pkg/security/ebpf/c/include/hooks/dentry_resolver.h b/pkg/security/ebpf/c/include/hooks/dentry_resolver.h index a47ecd1050c79..1304ee7c94cc8 100644 --- a/pkg/security/ebpf/c/include/hooks/dentry_resolver.h +++ b/pkg/security/ebpf/c/include/hooks/dentry_resolver.h @@ -20,7 +20,7 @@ int __attribute__((always_inline)) resolve_dentry_tail_call(void *ctx, struct de return DENTRY_ERROR; } *params = (struct is_discarded_by_inode_t){ - .discarder_type = input->discarder_type, + .event_type = input->discarder_event_type, .now = bpf_ktime_get_ns(), }; @@ -40,7 +40,7 @@ int __attribute__((always_inline)) resolve_dentry_tail_call(void *ctx, struct de next_key.mount_id = 0; } - if (input->discarder_type && input->iteration == 1 && i <= 3) { + if (input->discarder_event_type && input->iteration == 1 && i <= 3) { params->discarder.path_key.ino = key.ino; params->discarder.path_key.mount_id = key.mount_id; params->discarder.is_leaf = i == 0; diff --git a/pkg/security/ebpf/c/include/hooks/exec.h b/pkg/security/ebpf/c/include/hooks/exec.h index c7ff9374bc2a9..a99783248c5d9 100644 --- a/pkg/security/ebpf/c/include/hooks/exec.h +++ b/pkg/security/ebpf/c/include/hooks/exec.h @@ -80,7 +80,7 @@ int __attribute__((always_inline)) handle_interpreted_exec_event(void *ctx, stru // This overwrites the resolver fields on this syscall, but that's ok because the executed file has already been written to the map/pathnames ebpf map. syscall->resolver.key = syscall->exec.linux_binprm.interpreter; syscall->resolver.dentry = get_file_dentry(file); - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = DR_NO_CALLBACK; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/link.h b/pkg/security/ebpf/c/include/hooks/link.h index 51a4a18ee88b2..8184b16cfd4bf 100644 --- a/pkg/security/ebpf/c/include/hooks/link.h +++ b/pkg/security/ebpf/c/include/hooks/link.h @@ -69,8 +69,9 @@ int hook_vfs_link(ctx_t *ctx) { // force a new path id to force path resolution set_file_inode(src_dentry, &syscall->link.src_file, 1); - if (filter_syscall(syscall, link_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, link_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } fill_file(src_dentry, &syscall->link.src_file); @@ -85,7 +86,7 @@ int hook_vfs_link(ctx_t *ctx) { syscall->resolver.dentry = src_dentry; syscall->resolver.key = syscall->link.src_file.path_key; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_LINK : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_LINK_SRC_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -107,7 +108,8 @@ int tail_call_target_dr_link_src_callback(ctx_t *ctx) { if (syscall->resolver.ret == DENTRY_DISCARDED) { monitor_discarded(EVENT_LINK); - return mark_as_discarded(syscall); + // do not pop, we want to invalidate the inode even if the syscall is discarded + syscall->state = DISCARDED; } return 0; @@ -124,18 +126,16 @@ int __attribute__((always_inline)) sys_link_ret(void *ctx, int retval, int dr_ty return 0; } - int pass_to_userspace = !syscall->discarded && is_event_enabled(EVENT_LINK); - // invalidate user space inode, so no need to bump the discarder revision in the event if (retval >= 0) { // for hardlink we need to invalidate the discarders as the nlink counter in now > 1 expire_inode_discarders(syscall->link.src_file.path_key.mount_id, syscall->link.src_file.path_key.ino); } - if (pass_to_userspace) { + if (syscall->state != DISCARDED && is_event_enabled(EVENT_LINK)) { syscall->resolver.dentry = syscall->link.target_dentry; syscall->resolver.key = syscall->link.target_file.path_key; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = select_dr_key(dr_type, DR_LINK_DST_CALLBACK_KPROBE_KEY, DR_LINK_DST_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/mkdir.h b/pkg/security/ebpf/c/include/hooks/mkdir.h index 0047972df52df..c736528d7222f 100644 --- a/pkg/security/ebpf/c/include/hooks/mkdir.h +++ b/pkg/security/ebpf/c/include/hooks/mkdir.h @@ -8,6 +8,10 @@ #include "helpers/syscalls.h" long __attribute__((always_inline)) trace__sys_mkdir(u8 async, umode_t mode) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_MKDIR); struct syscall_cache_t syscall = { .type = EVENT_MKDIR, @@ -51,8 +55,8 @@ int hook_vfs_mkdir(ctx_t *ctx) { syscall->mkdir.file.path_key.mount_id = get_path_mount_id(syscall->mkdir.path); - if (filter_syscall(syscall, mkdir_approvers)) { - return discard_syscall(syscall); + if (approve_syscall(syscall, mkdir_approvers) == DISCARDED) { + pop_syscall(EVENT_MKDIR); } return 0; @@ -64,7 +68,7 @@ int __attribute__((always_inline)) sys_mkdir_ret(void *ctx, int retval, int dr_t return 0; } if (IS_UNHANDLED_ERROR(retval)) { - discard_syscall(syscall); + pop_syscall(EVENT_MKDIR); return 0; } @@ -73,7 +77,7 @@ int __attribute__((always_inline)) sys_mkdir_ret(void *ctx, int retval, int dr_t syscall->resolver.key = syscall->mkdir.file.path_key; syscall->resolver.dentry = syscall->mkdir.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_MKDIR : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = select_dr_key(dr_type, DR_MKDIR_CALLBACK_KPROBE_KEY, DR_MKDIR_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/mmap.h b/pkg/security/ebpf/c/include/hooks/mmap.h index 45392650a94ca..d15a25651e63d 100644 --- a/pkg/security/ebpf/c/include/hooks/mmap.h +++ b/pkg/security/ebpf/c/include/hooks/mmap.h @@ -9,6 +9,10 @@ HOOK_ENTRY("vm_mmap_pgoff") int hook_vm_mmap_pgoff(ctx_t *ctx) { + if (is_discarded_by_pid()) { + return 0; + } + u64 len = CTX_PARM3(ctx); u64 prot = CTX_PARM4(ctx); u64 flags = CTX_PARM5(ctx); @@ -47,13 +51,13 @@ int __attribute__((always_inline)) sys_mmap_ret(void *ctx, int retval, u64 addr) return 0; } - if (syscall->resolver.ret == DENTRY_DISCARDED) { - monitor_discarded(EVENT_MMAP); + if (approve_syscall(syscall, mmap_approvers) == DISCARDED) { return 0; } - if (filter_syscall(syscall, mmap_approvers)) { - return mark_as_discarded(syscall); + if (syscall->resolver.ret == DENTRY_DISCARDED) { + monitor_discarded(EVENT_MMAP); + return 0; } if (retval != -1) { @@ -101,9 +105,9 @@ int hook_security_mmap_file(ctx_t *ctx) { syscall->resolver.key = syscall->mmap.file.path_key; syscall->resolver.dentry = syscall->mmap.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_MMAP : 0; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); resolve_dentry(ctx, DR_KPROBE_OR_FENTRY); diff --git a/pkg/security/ebpf/c/include/hooks/module.h b/pkg/security/ebpf/c/include/hooks/module.h index 905301cbeebf2..ab5bc5caea8ee 100644 --- a/pkg/security/ebpf/c/include/hooks/module.h +++ b/pkg/security/ebpf/c/include/hooks/module.h @@ -42,8 +42,8 @@ int __attribute__((always_inline)) trace_kernel_file(ctx_t *ctx, struct file *f, syscall->resolver.key = syscall->init_module.file.path_key; syscall->resolver.dentry = syscall->init_module.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_INIT_MODULE : 0; syscall->resolver.iteration = 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_NO_CALLBACK; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/mount.h b/pkg/security/ebpf/c/include/hooks/mount.h index 2a56d04b1a750..00247418d7a0f 100644 --- a/pkg/security/ebpf/c/include/hooks/mount.h +++ b/pkg/security/ebpf/c/include/hooks/mount.h @@ -171,7 +171,7 @@ void __attribute__((always_inline)) handle_new_mount(void *ctx, struct syscall_c syscall->resolver.key = syscall->mount.root_key; syscall->resolver.dentry = root_dentry; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = select_dr_key(dr_type, DR_MOUNT_STAGE_ONE_CALLBACK_KPROBE_KEY, DR_MOUNT_STAGE_ONE_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -189,7 +189,7 @@ int __attribute__((always_inline)) dr_mount_stage_one_callback(void *ctx, int dr syscall->resolver.key = syscall->mount.mountpoint_key; syscall->resolver.dentry = syscall->mount.mountpoint_dentry; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = select_dr_key(dr_type, DR_MOUNT_STAGE_TWO_CALLBACK_KPROBE_KEY, DR_MOUNT_STAGE_TWO_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/mprotect.h b/pkg/security/ebpf/c/include/hooks/mprotect.h index 0a6107c12c13b..7bfb5c657d54f 100644 --- a/pkg/security/ebpf/c/include/hooks/mprotect.h +++ b/pkg/security/ebpf/c/include/hooks/mprotect.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" HOOK_SYSCALL_ENTRY0(mprotect) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_MPROTECT); struct syscall_cache_t syscall = { .type = EVENT_MPROTECT, @@ -42,8 +46,8 @@ int __attribute__((always_inline)) sys_mprotect_ret(void *ctx, int retval) { return 0; } - if (filter_syscall(syscall, mprotect_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, mprotect_approvers) == DISCARDED) { + return 0; } struct mprotect_event_t event = { diff --git a/pkg/security/ebpf/c/include/hooks/network/router.h b/pkg/security/ebpf/c/include/hooks/network/router.h index 11f42d748c195..81978692e5113 100644 --- a/pkg/security/ebpf/c/include/hooks/network/router.h +++ b/pkg/security/ebpf/c/include/hooks/network/router.h @@ -44,13 +44,17 @@ __attribute__((always_inline)) int route_pkt(struct __sk_buff *skb, struct packe // TODO: l3 / l4 firewall // route DNS requests - if (pkt->l4_protocol == IPPROTO_UDP && pkt->translated_ns_flow.flow.dport == htons(53)) { - tail_call_to_classifier(skb, DNS_REQUEST); + if (is_event_enabled(EVENT_DNS)) { + if (pkt->l4_protocol == IPPROTO_UDP && pkt->translated_ns_flow.flow.dport == htons(53)) { + tail_call_to_classifier(skb, DNS_REQUEST); + } } // route IMDS requests - if (pkt->l4_protocol == IPPROTO_TCP && ((pkt->ns_flow.flow.saddr[0] & 0xFFFFFFFF) == get_imds_ip() || (pkt->ns_flow.flow.daddr[0] & 0xFFFFFFFF) == get_imds_ip())) { - tail_call_to_classifier(skb, IMDS_REQUEST); + if (is_event_enabled(EVENT_IMDS)) { + if (pkt->l4_protocol == IPPROTO_TCP && ((pkt->ns_flow.flow.saddr[0] & 0xFFFFFFFF) == get_imds_ip() || (pkt->ns_flow.flow.daddr[0] & 0xFFFFFFFF) == get_imds_ip())) { + tail_call_to_classifier(skb, IMDS_REQUEST); + } } return ACT_OK; diff --git a/pkg/security/ebpf/c/include/hooks/open.h b/pkg/security/ebpf/c/include/hooks/open.h index 3d48368979c93..d41d105fdbbf7 100644 --- a/pkg/security/ebpf/c/include/hooks/open.h +++ b/pkg/security/ebpf/c/include/hooks/open.h @@ -11,6 +11,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_openat2(const char *path, u8 async, int flags, umode_t mode, u64 pid_tgid) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_OPEN); struct syscall_cache_t syscall = { .type = EVENT_OPEN, @@ -85,9 +89,8 @@ int __attribute__((always_inline)) handle_open_event(struct syscall_cache_t *sys set_file_inode(dentry, &syscall->open.file, 0); - if (filter_syscall(syscall, open_approvers)) { - return mark_as_discarded(syscall); - } + // do not pop, we want to keep track of the mount ref counter later in the stack + approve_syscall(syscall, open_approvers); return 0; } @@ -112,9 +115,8 @@ int __attribute__((always_inline)) handle_truncate_path_dentry(struct path *path set_file_inode(dentry, &syscall->open.file, 0); - if (filter_syscall(syscall, open_approvers)) { - return mark_as_discarded(syscall); - } + // do not pop, we want to keep track of the mount ref counter later in the stack + approve_syscall(syscall, open_approvers); return 0; } @@ -237,14 +239,14 @@ int __attribute__((always_inline)) sys_open_ret(void *ctx, int retval, int dr_ty // increase mount ref inc_mount_ref(syscall->open.file.path_key.mount_id); - if (syscall->discarded) { + if (syscall->state == DISCARDED) { pop_syscall(EVENT_OPEN); return 0; } syscall->resolver.key = syscall->open.file.path_key; syscall->resolver.dentry = syscall->open.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_OPEN : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = select_dr_key(dr_type, DR_OPEN_CALLBACK_KPROBE_KEY, DR_OPEN_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/rename.h b/pkg/security/ebpf/c/include/hooks/rename.h index 4cef2525162b7..3cd813cac99f2 100644 --- a/pkg/security/ebpf/c/include/hooks/rename.h +++ b/pkg/security/ebpf/c/include/hooks/rename.h @@ -90,14 +90,15 @@ int hook_vfs_rename(ctx_t *ctx) { } // always return after any invalidate_inode call - if (filter_syscall(syscall, rename_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, rename_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } // the mount id of path_key is resolved by kprobe/mnt_want_write. It is already set by the time we reach this probe. syscall->resolver.dentry = syscall->rename.src_dentry; syscall->resolver.key = syscall->rename.src_file.path_key; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = DR_NO_CALLBACK; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -128,8 +129,6 @@ int __attribute__((always_inline)) sys_rename_ret(void *ctx, int retval, int dr_ expire_inode_discarders(syscall->rename.target_file.path_key.mount_id, inode); } - int pass_to_userspace = !syscall->discarded && is_event_enabled(EVENT_RENAME); - // invalid discarder + path_id if (retval >= 0) { expire_inode_discarders(syscall->rename.target_file.path_key.mount_id, syscall->rename.target_file.path_key.ino); @@ -141,11 +140,11 @@ int __attribute__((always_inline)) sys_rename_ret(void *ctx, int retval, int dr_ } } - if (pass_to_userspace) { + if (syscall->state != DISCARDED && is_event_enabled(EVENT_RENAME)) { // for centos7, use src dentry for target resolution as the pointers have been swapped syscall->resolver.key = syscall->rename.target_file.path_key; syscall->resolver.dentry = syscall->rename.src_dentry; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = select_dr_key(dr_type, DR_RENAME_CALLBACK_KPROBE_KEY, DR_RENAME_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/rmdir.h b/pkg/security/ebpf/c/include/hooks/rmdir.h index 71fc46ec73f05..f8b0b75a09f82 100644 --- a/pkg/security/ebpf/c/include/hooks/rmdir.h +++ b/pkg/security/ebpf/c/include/hooks/rmdir.h @@ -58,8 +58,11 @@ int hook_security_inode_rmdir(ctx_t *ctx) { key = syscall->rmdir.file.path_key; syscall->rmdir.dentry = dentry; - if (filter_syscall(syscall, rmdir_approvers)) { - return mark_as_discarded(syscall); + syscall->policy = fetch_policy(EVENT_RMDIR); + + if (approve_syscall(syscall, rmdir_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } break; @@ -77,9 +80,13 @@ int hook_security_inode_rmdir(ctx_t *ctx) { key = syscall->unlink.file.path_key; syscall->unlink.dentry = dentry; + + // fake rmdir event as we will generate and rmdir event at the end syscall->policy = fetch_policy(EVENT_RMDIR); - if (filter_syscall(syscall, rmdir_approvers)) { - return mark_as_discarded(syscall); + + if (approve_syscall(syscall, rmdir_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } break; @@ -90,7 +97,7 @@ int hook_security_inode_rmdir(ctx_t *ctx) { if (dentry != NULL) { syscall->resolver.key = key; syscall->resolver.dentry = dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? syscall->type : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_SECURITY_INODE_RMDIR_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -111,8 +118,9 @@ int tail_call_target_dr_security_inode_rmdir_callback(ctx_t *ctx) { } if (syscall->resolver.ret == DENTRY_DISCARDED) { - monitor_discarded(EVENT_RMDIR); - return mark_as_discarded(syscall); + monitor_discarded(syscall->type); + // do not pop, we want to invalidate the inode even if the syscall is discarded + syscall->state = DISCARDED; } return 0; } @@ -127,8 +135,7 @@ int __attribute__((always_inline)) sys_rmdir_ret(void *ctx, int retval) { return 0; } - int pass_to_userspace = !syscall->discarded && is_event_enabled(EVENT_RMDIR); - if (pass_to_userspace) { + if (syscall->state != DISCARDED && is_event_enabled(EVENT_RMDIR)) { struct rmdir_event_t event = { .syscall.retval = retval, .event.flags = syscall->async ? EVENT_FLAGS_ASYNC : 0, diff --git a/pkg/security/ebpf/c/include/hooks/selinux.h b/pkg/security/ebpf/c/include/hooks/selinux.h index aa9d7084823b9..f6efc208eb1bb 100644 --- a/pkg/security/ebpf/c/include/hooks/selinux.h +++ b/pkg/security/ebpf/c/include/hooks/selinux.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) handle_selinux_event(void *ctx, struct file *file, const char *buf, size_t count, enum selinux_source_event_t source_event) { + if (is_discarded_by_pid()) { + return 0; + } + struct syscall_cache_t syscall = { .type = EVENT_SELINUX, .policy = fetch_policy(EVENT_SELINUX), @@ -55,7 +59,7 @@ int __attribute__((always_inline)) handle_selinux_event(void *ctx, struct file * syscall.resolver.key = syscall.selinux.file.path_key; syscall.resolver.dentry = syscall.selinux.dentry; - syscall.resolver.discarder_type = syscall.policy.mode != NO_FILTER ? EVENT_SELINUX : 0; + syscall.resolver.discarder_event_type = dentry_resolver_discarder_event_type(&syscall); syscall.resolver.callback = DR_SELINUX_CALLBACK_KPROBE_KEY; syscall.resolver.iteration = 0; syscall.resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/setattr.h b/pkg/security/ebpf/c/include/hooks/setattr.h index d271283fee02d..4a4b3729283e6 100644 --- a/pkg/security/ebpf/c/include/hooks/setattr.h +++ b/pkg/security/ebpf/c/include/hooks/setattr.h @@ -63,20 +63,23 @@ int hook_security_inode_setattr(ctx_t *ctx) { u64 event_type = 0; switch (syscall->type) { case EVENT_UTIME: - if (filter_syscall(syscall, utime_approvers)) { - return discard_syscall(syscall); + if (approve_syscall(syscall, utime_approvers) == DISCARDED) { + pop_syscall(EVENT_UTIME); + return 0; } event_type = EVENT_UTIME; break; case EVENT_CHMOD: - if (filter_syscall(syscall, chmod_approvers)) { - return discard_syscall(syscall); + if (approve_syscall(syscall, chmod_approvers) == DISCARDED) { + pop_syscall(EVENT_CHMOD); + return 0; } event_type = EVENT_CHMOD; break; case EVENT_CHOWN: - if (filter_syscall(syscall, chown_approvers)) { - return discard_syscall(syscall); + if (approve_syscall(syscall, chown_approvers) == DISCARDED) { + pop_syscall(EVENT_CHOWN); + return 0; } event_type = EVENT_CHOWN; break; @@ -84,7 +87,7 @@ int hook_security_inode_setattr(ctx_t *ctx) { syscall->resolver.dentry = syscall->setattr.dentry; syscall->resolver.key = syscall->setattr.file.path_key; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? event_type : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_SETATTR_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -106,7 +109,7 @@ int tail_call_target_dr_setattr_callback(ctx_t *ctx) { if (syscall->resolver.ret == DENTRY_DISCARDED) { monitor_discarded(syscall->type); - return discard_syscall(syscall); + pop_syscall(syscall->resolver.discarder_event_type); } return 0; diff --git a/pkg/security/ebpf/c/include/hooks/setxattr.h b/pkg/security/ebpf/c/include/hooks/setxattr.h index f97a33e160f3b..1590a5507199e 100644 --- a/pkg/security/ebpf/c/include/hooks/setxattr.h +++ b/pkg/security/ebpf/c/include/hooks/setxattr.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_setxattr(const char *xattr_name) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_SETXATTR); struct syscall_cache_t syscall = { .type = EVENT_SETXATTR, @@ -84,7 +88,7 @@ int __attribute__((always_inline)) trace__vfs_setxattr(ctx_t *ctx, u64 event_typ // the mount id of path_key is resolved by kprobe/mnt_want_write. It is already set by the time we reach this probe. syscall->resolver.dentry = syscall->xattr.dentry; syscall->resolver.key = syscall->xattr.file.path_key; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? event_type : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_SETXATTR_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -106,7 +110,7 @@ int tail_call_target_dr_setxattr_callback(ctx_t *ctx) { if (syscall->resolver.ret == DENTRY_DISCARDED) { monitor_discarded(EVENT_SETXATTR); - return discard_syscall(syscall); + pop_syscall(EVENT_SETXATTR); } return 0; diff --git a/pkg/security/ebpf/c/include/hooks/signal.h b/pkg/security/ebpf/c/include/hooks/signal.h index 8b6d11fb80206..92dbd145d7992 100644 --- a/pkg/security/ebpf/c/include/hooks/signal.h +++ b/pkg/security/ebpf/c/include/hooks/signal.h @@ -6,6 +6,10 @@ #include "helpers/syscalls.h" HOOK_SYSCALL_ENTRY2(kill, int, pid, int, type) { + if (is_discarded_by_pid()) { + return 0; + } + /* TODO: implement the event for pid equal to 0 or -1. */ if (pid < 1) { return 0; diff --git a/pkg/security/ebpf/c/include/hooks/splice.h b/pkg/security/ebpf/c/include/hooks/splice.h index 7dff0490400e7..4d289c94ab615 100644 --- a/pkg/security/ebpf/c/include/hooks/splice.h +++ b/pkg/security/ebpf/c/include/hooks/splice.h @@ -9,6 +9,10 @@ #include "helpers/syscalls.h" HOOK_SYSCALL_ENTRY0(splice) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_SPLICE); struct syscall_cache_t syscall = { .type = EVENT_SPLICE, @@ -50,9 +54,9 @@ int rethook_get_pipe_info(ctx_t *ctx) { syscall->splice.file_found = 1; syscall->resolver.key = syscall->splice.file.path_key; syscall->resolver.dentry = syscall->splice.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_SPLICE : 0; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); resolve_dentry(ctx, DR_KPROBE_OR_FENTRY); @@ -87,8 +91,8 @@ int __attribute__((always_inline)) sys_splice_ret(void *ctx, int retval) { syscall->splice.pipe_exit_flag = get_pipe_last_buffer_flags(syscall->splice.pipe_info, syscall->splice.bufs); } - if (filter_syscall(syscall, splice_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, splice_approvers) == DISCARDED) { + return 0; } struct splice_event_t event = { diff --git a/pkg/security/ebpf/c/include/hooks/unlink.h b/pkg/security/ebpf/c/include/hooks/unlink.h index 55f97e367494a..aefe174c7465a 100644 --- a/pkg/security/ebpf/c/include/hooks/unlink.h +++ b/pkg/security/ebpf/c/include/hooks/unlink.h @@ -69,14 +69,15 @@ int hook_vfs_unlink(ctx_t *ctx) { set_file_inode(dentry, &syscall->unlink.file, 1); fill_file(dentry, &syscall->unlink.file); - if (filter_syscall(syscall, unlink_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, unlink_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } // the mount id of path_key is resolved by kprobe/mnt_want_write. It is already set by the time we reach this probe. syscall->resolver.dentry = dentry; syscall->resolver.key = syscall->unlink.file.path_key; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_UNLINK : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_UNLINK_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -96,8 +97,10 @@ int tail_call_target_dr_unlink_callback(ctx_t *ctx) { return 0; } - if (syscall->resolver.ret < 0) { - return mark_as_discarded(syscall); + if (syscall->resolver.ret == DENTRY_DISCARDED) { + monitor_discarded(EVENT_UNLINK); + // do not pop, we want to invalidate the inode even if the syscall is discarded + syscall->state = DISCARDED; } return 0; @@ -114,10 +117,7 @@ int __attribute__((always_inline)) sys_unlink_ret(void *ctx, int retval) { } u64 enabled_events = get_enabled_events(); - int pass_to_userspace = !syscall->discarded && - (mask_has_event(enabled_events, EVENT_UNLINK) || - mask_has_event(enabled_events, EVENT_RMDIR)); - if (pass_to_userspace) { + if (syscall->state != DISCARDED && (mask_has_event(enabled_events, EVENT_UNLINK) || mask_has_event(enabled_events, EVENT_RMDIR))) { if (syscall->unlink.flags & AT_REMOVEDIR) { struct rmdir_event_t event = { .syscall.retval = retval, diff --git a/pkg/security/ebpf/c/include/hooks/utimes.h b/pkg/security/ebpf/c/include/hooks/utimes.h index 88bdc8d44c8ae..8cb865dc7f998 100644 --- a/pkg/security/ebpf/c/include/hooks/utimes.h +++ b/pkg/security/ebpf/c/include/hooks/utimes.h @@ -6,6 +6,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_utimes(const char *filename) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_UTIME); struct syscall_cache_t syscall = { .type = EVENT_UTIME, diff --git a/pkg/security/ebpf/c/include/maps.h b/pkg/security/ebpf/c/include/maps.h index 91cca71ffe8ec..c690d3e00a1ab 100644 --- a/pkg/security/ebpf/c/include/maps.h +++ b/pkg/security/ebpf/c/include/maps.h @@ -32,12 +32,14 @@ BPF_HASH_MAP(activity_dump_config_defaults, u32, struct activity_dump_config, 1) BPF_HASH_MAP(traced_cgroups, container_id_t, u64, 1) // max entries will be overridden at runtime BPF_HASH_MAP(cgroup_wait_list, container_id_t, u64, 1) // max entries will be overridden at runtime BPF_HASH_MAP(traced_pids, u32, u64, 8192) // max entries will be overridden at runtime -BPF_HASH_MAP(basename_approvers, struct basename_t, struct basename_filter_t, 255) +BPF_HASH_MAP(basename_approvers, struct basename_t, struct event_mask_filter_t, 255) BPF_HASH_MAP(register_netdevice_cache, u64, struct register_netdevice_cache_t, 1024) BPF_HASH_MAP(netdevice_lookup_cache, u64, struct device_ifindex_t, 1024) BPF_HASH_MAP(fd_link_pid, u8, u32, 1) BPF_HASH_MAP(security_profiles, container_id_t, struct security_profile_t, 1) // max entries will be overriden at runtime BPF_HASH_MAP(secprofs_syscalls, u64, struct security_profile_syscalls_t, 1) // max entries will be overriden at runtime +BPF_HASH_MAP(auid_approvers, u32, struct event_mask_filter_t, 128) +BPF_HASH_MAP(auid_range_approvers, u32, struct u32_range_filter_t, EVENT_MAX) BPF_LRU_MAP(activity_dump_rate_limiters, u64, struct activity_dump_rate_limiter_ctx, 1) // max entries will be overridden at runtime BPF_LRU_MAP(mount_ref, u32, struct mount_ref_t, 64000) @@ -63,6 +65,7 @@ BPF_LRU_MAP(syscall_monitor, struct syscall_monitor_key_t, struct syscall_monito BPF_LRU_MAP(syscall_table, struct syscall_table_key_t, u8, 50) BPF_LRU_MAP(kill_list, u32, u32, 32) BPF_LRU_MAP(user_sessions, struct user_session_key_t, struct user_session_t, 1024) +BPF_LRU_MAP(dentry_resolver_inputs, u64, struct dentry_resolver_input_t, 256) BPF_LRU_MAP_FLAGS(tasks_in_coredump, u64, u8, 64, BPF_F_NO_COMMON_LRU) BPF_LRU_MAP_FLAGS(syscalls, u64, struct syscall_cache_t, 1, BPF_F_NO_COMMON_LRU) // max entries will be overridden at runtime @@ -89,7 +92,6 @@ BPF_PERCPU_ARRAY_MAP(syscalls_stats, struct syscalls_stats_t, EVENT_MAX) BPF_PROG_ARRAY(args_envs_progs, 3) BPF_PROG_ARRAY(dentry_resolver_kprobe_or_fentry_callbacks, EVENT_MAX) -BPF_LRU_MAP(dentry_resolver_inputs, u64, struct dentry_resolver_input_t, 256) BPF_PROG_ARRAY(dentry_resolver_tracepoint_callbacks, EVENT_MAX) BPF_PROG_ARRAY(dentry_resolver_kprobe_or_fentry_progs, 6) BPF_PROG_ARRAY(dentry_resolver_tracepoint_progs, 3) diff --git a/pkg/security/ebpf/c/include/structs/dentry_resolver.h b/pkg/security/ebpf/c/include/structs/dentry_resolver.h index d60012aa0e38d..36edc15a7ed38 100644 --- a/pkg/security/ebpf/c/include/structs/dentry_resolver.h +++ b/pkg/security/ebpf/c/include/structs/dentry_resolver.h @@ -33,8 +33,14 @@ struct dentry_resolver_input_t { struct path_key_t key; struct path_key_t original_key; struct dentry *dentry; - u64 discarder_type; - s64 sysretval; + u64 discarder_event_type; + union { + s64 sysretval; + struct { + u32 cgroup_write_pid; + u32 cgroup_flags; + } cgroup_write_ctx; + }; int callback; int ret; int iteration; diff --git a/pkg/security/ebpf/c/include/structs/filter.h b/pkg/security/ebpf/c/include/structs/filter.h index 9d876d0a89ef9..78f99c0d19c00 100644 --- a/pkg/security/ebpf/c/include/structs/filter.h +++ b/pkg/security/ebpf/c/include/structs/filter.h @@ -14,13 +14,14 @@ struct policy_t { struct approver_stats_t { u64 event_approved_by_basename; u64 event_approved_by_flag; + u64 event_approved_by_auid; }; struct basename_t { char value[BASENAME_FILTER_SIZE]; }; -struct basename_filter_t { +struct event_mask_filter_t { u64 event_mask; }; @@ -34,6 +35,11 @@ struct u64_flags_filter_t { u8 is_set; }; +struct u32_range_filter_t { + u32 min; + u32 max; +}; + // Discarders struct discarder_stats_t { @@ -61,7 +67,7 @@ struct inode_discarder_t { }; struct is_discarded_by_inode_t { - u64 discarder_type; + u64 event_type; struct inode_discarder_t discarder; u64 now; }; diff --git a/pkg/security/ebpf/c/include/structs/syscalls.h b/pkg/security/ebpf/c/include/structs/syscalls.h index 3fe9851827624..7217ec8a54810 100644 --- a/pkg/security/ebpf/c/include/structs/syscalls.h +++ b/pkg/security/ebpf/c/include/structs/syscalls.h @@ -32,7 +32,7 @@ struct syscall_table_key_t { struct syscall_cache_t { struct policy_t policy; u64 type; - u8 discarded; + enum SYSCALL_STATE state; u8 async; u32 ctx_id; struct dentry_resolver_input_t resolver; diff --git a/pkg/security/ebpf/c/include/tests/discarders_test.h b/pkg/security/ebpf/c/include/tests/discarders_test.h index 074f179b8e7e7..6738e64eca392 100644 --- a/pkg/security/ebpf/c/include/tests/discarders_test.h +++ b/pkg/security/ebpf/c/include/tests/discarders_test.h @@ -6,7 +6,7 @@ int __attribute__((always_inline)) _is_discarded_by_inode(u64 event_type, u32 mount_id, u64 inode) { struct is_discarded_by_inode_t params = { - .discarder_type = event_type, + .event_type = event_type, .discarder = { .path_key.ino = inode, .path_key.mount_id = mount_id, diff --git a/pkg/security/ebpf/map.go b/pkg/security/ebpf/map.go index 99e39dff3dbf5..cb2fcf84ae9f7 100644 --- a/pkg/security/ebpf/map.go +++ b/pkg/security/ebpf/map.go @@ -126,6 +126,25 @@ func NewUint64FlagsMapItem(i uint64) *Uint64FlagsMapItem { return &item } +// UInt32RangeMapItem defines a uint32 range map item +type UInt32RangeMapItem struct { + Min uint32 + Max uint32 +} + +// MarshalBinary returns the binary representation of a UInt32RangeMapItem +func (i *UInt32RangeMapItem) MarshalBinary() ([]byte, error) { + b := make([]byte, 8) + binary.NativeEndian.PutUint32(b, i.Min) + binary.NativeEndian.PutUint32(b[4:], i.Max) + return b, nil +} + +// NewUInt32RangeMapItem returns a new UInt32RangeMapItem +func NewUInt32RangeMapItem(min, max uint32) *UInt32RangeMapItem { + return &UInt32RangeMapItem{Min: min, Max: max} +} + // Zero table items var ( ZeroUint8MapItem = BytesMapItem([]byte{0}) diff --git a/pkg/security/events/ad_limiter.go b/pkg/security/events/ad_limiter.go new file mode 100644 index 0000000000000..fdb9e70175ff6 --- /dev/null +++ b/pkg/security/events/ad_limiter.go @@ -0,0 +1,40 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package events holds events related files +package events + +import ( + "time" + + "github.com/DataDog/datadog-agent/pkg/security/utils" +) + +// AnomalyDetectionLimiter limiter specific to anomaly detection +type AnomalyDetectionLimiter struct { + limiter *utils.Limiter[string] +} + +// Allow returns whether the event is allowed +func (al *AnomalyDetectionLimiter) Allow(event Event) bool { + return al.limiter.Allow(event.GetWorkloadID()) +} + +// SwapStats return dropped and allowed stats +func (al *AnomalyDetectionLimiter) SwapStats() []utils.LimiterStat { + return al.limiter.SwapStats() +} + +// NewAnomalyDetectionLimiter returns a new rate limiter which is bucketed by workload ID +func NewAnomalyDetectionLimiter(numWorkloads int, numEventsAllowedPerPeriod int, period time.Duration) (*AnomalyDetectionLimiter, error) { + limiter, err := utils.NewLimiter[string](numWorkloads, numEventsAllowedPerPeriod, period) + if err != nil { + return nil, err + } + + return &AnomalyDetectionLimiter{ + limiter: limiter, + }, nil +} diff --git a/pkg/security/events/custom.go b/pkg/security/events/custom.go index 4125838c53277..289fac4268185 100644 --- a/pkg/security/events/custom.go +++ b/pkg/security/events/custom.go @@ -156,6 +156,11 @@ func (ce *CustomEvent) GetWorkloadID() string { return "" } +// GetFieldValue returns the field value +func (ce *CustomEvent) GetFieldValue(_ eval.Field) (interface{}, error) { + return "", eval.ErrFieldNotFound{} +} + // GetEventType returns the event type func (ce *CustomEvent) GetEventType() model.EventType { return ce.eventType diff --git a/pkg/security/events/event.go b/pkg/security/events/event.go index 8cf72317d83d5..ae863ac3d0777 100644 --- a/pkg/security/events/event.go +++ b/pkg/security/events/event.go @@ -11,6 +11,7 @@ package events import ( "encoding/json" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) @@ -42,6 +43,7 @@ type Event interface { GetTags() []string GetType() string GetActionReports() []model.ActionReport + GetFieldValue(eval.Field) (interface{}, error) } // EventSender defines an event sender diff --git a/pkg/security/events/rate_limiter.go b/pkg/security/events/rate_limiter.go index 231b9e096ed1b..2c800f00565f1 100644 --- a/pkg/security/events/rate_limiter.go +++ b/pkg/security/events/rate_limiter.go @@ -11,10 +11,10 @@ import ( "sync" "time" + "github.com/DataDog/datadog-agent/pkg/security/seclog" "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-go/v5/statsd" - "go.uber.org/atomic" "golang.org/x/time/rate" "github.com/DataDog/datadog-agent/pkg/security/config" @@ -24,11 +24,14 @@ import ( ) const ( - // Arbitrary default limit to prevent flooding. - defaultLimit = rate.Limit(10) + // Arbitrary default interval between two events to prevent flooding. + defaultEvery = 100 * time.Millisecond // Default Token bucket size. 40 is meant to handle sudden burst of events while making sure that we prevent // flooding. defaultBurst = 40 + + // maxUniqueToken maximum unique token for the token based rate limiter + maxUniqueToken = 500 ) var ( @@ -49,74 +52,6 @@ type Limiter interface { SwapStats() []utils.LimiterStat } -// StdLimiter describes an object that applies limits on -// the rate of triggering of a rule to ensure we don't overflow -// with too permissive rules -type StdLimiter struct { - rateLimiter *rate.Limiter - - // stats - dropped *atomic.Uint64 - allowed *atomic.Uint64 -} - -// NewStdLimiter returns a new rule limiter -func NewStdLimiter(limit rate.Limit, burst int) *StdLimiter { - return &StdLimiter{ - rateLimiter: rate.NewLimiter(limit, burst), - dropped: atomic.NewUint64(0), - allowed: atomic.NewUint64(0), - } -} - -// Allow returns whether the event is allowed -func (l *StdLimiter) Allow(_ Event) bool { - if l.rateLimiter.Allow() { - l.allowed.Inc() - return true - } - l.dropped.Inc() - - return false -} - -// SwapStats returns the dropped and allowed stats, and zeros the stats -func (l *StdLimiter) SwapStats() []utils.LimiterStat { - return []utils.LimiterStat{ - { - Dropped: l.dropped.Swap(0), - Allowed: l.allowed.Swap(0), - }, - } -} - -// AnomalyDetectionLimiter limiter specific to anomaly detection -type AnomalyDetectionLimiter struct { - limiter *utils.Limiter[string] -} - -// Allow returns whether the event is allowed -func (al *AnomalyDetectionLimiter) Allow(event Event) bool { - return al.limiter.Allow(event.GetWorkloadID()) -} - -// SwapStats return dropped and allowed stats -func (al *AnomalyDetectionLimiter) SwapStats() []utils.LimiterStat { - return al.limiter.SwapStats() -} - -// NewAnomalyDetectionLimiter returns a new rate limiter which is bucketed by workload ID -func NewAnomalyDetectionLimiter(numWorkloads int, numEventsAllowedPerPeriod int, period time.Duration) (*AnomalyDetectionLimiter, error) { - limiter, err := utils.NewLimiter[string](numWorkloads, numEventsAllowedPerPeriod, period) - if err != nil { - return nil, err - } - - return &AnomalyDetectionLimiter{ - limiter: limiter, - }, nil -} - // RateLimiter describes a set of rule rate limiters type RateLimiter struct { sync.RWMutex @@ -158,17 +93,28 @@ func (rl *RateLimiter) Apply(ruleSet *rules.RuleSet, customRuleIDs []eval.RuleID newLimiters := make(map[string]Limiter) for _, id := range customRuleIDs { - newLimiters[id] = NewStdLimiter(defaultLimit, defaultBurst) + newLimiters[id] = NewStdLimiter(rate.Every(defaultEvery), defaultBurst) } // override if there is more specific defs rl.applyBaseLimitersFromDefault(newLimiters) + var err error for id, rule := range ruleSet.GetRules() { + every, burst := defaultEvery, defaultBurst + if rule.Def.Every != 0 { - newLimiters[id] = NewStdLimiter(rate.Every(rule.Def.Every), 1) + every, burst = rule.Def.Every, 1 + } + + if len(rule.Def.RateLimiterToken) > 0 { + newLimiters[id], err = NewTokenLimiter(maxUniqueToken, burst, every, rule.Def.RateLimiterToken) + if err != nil { + seclog.Errorf("unable to use the token based rate limiter, fallback to the standard one: %s", err) + newLimiters[id] = NewStdLimiter(rate.Every(time.Duration(every)), burst) + } } else { - newLimiters[id] = NewStdLimiter(defaultLimit, defaultBurst) + newLimiters[id] = NewStdLimiter(rate.Every(time.Duration(every)), burst) } } diff --git a/pkg/security/events/std_limiter.go b/pkg/security/events/std_limiter.go new file mode 100644 index 0000000000000..f7eba7f146463 --- /dev/null +++ b/pkg/security/events/std_limiter.go @@ -0,0 +1,55 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package events holds events related files +package events + +import ( + "go.uber.org/atomic" + "golang.org/x/time/rate" + + "github.com/DataDog/datadog-agent/pkg/security/utils" +) + +// StdLimiter describes an object that applies limits on +// the rate of triggering of a rule to ensure we don't overflow +// with too permissive rules +type StdLimiter struct { + rateLimiter *rate.Limiter + + // stats + dropped *atomic.Uint64 + allowed *atomic.Uint64 +} + +// NewStdLimiter returns a new rule limiter +func NewStdLimiter(limit rate.Limit, burst int) *StdLimiter { + return &StdLimiter{ + rateLimiter: rate.NewLimiter(limit, burst), + dropped: atomic.NewUint64(0), + allowed: atomic.NewUint64(0), + } +} + +// Allow returns whether the event is allowed +func (l *StdLimiter) Allow(_ Event) bool { + if l.rateLimiter.Allow() { + l.allowed.Inc() + return true + } + l.dropped.Inc() + + return false +} + +// SwapStats returns the dropped and allowed stats, and zeros the stats +func (l *StdLimiter) SwapStats() []utils.LimiterStat { + return []utils.LimiterStat{ + { + Dropped: l.dropped.Swap(0), + Allowed: l.allowed.Swap(0), + }, + } +} diff --git a/pkg/security/events/token_limiter.go b/pkg/security/events/token_limiter.go new file mode 100644 index 0000000000000..bcfe9e7c97c91 --- /dev/null +++ b/pkg/security/events/token_limiter.go @@ -0,0 +1,79 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package events holds events related files +package events + +import ( + "fmt" + "time" + + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/utils" +) + +// TokenLimiter limiter specific to anomaly detection +type TokenLimiter struct { + getToken func(Event) string + limiter *utils.Limiter[string] +} + +// Allow returns whether the event is allowed +func (tkl *TokenLimiter) Allow(event Event) bool { + return tkl.limiter.Allow(tkl.getToken(event)) +} + +// SwapStats return dropped and allowed stats +func (tkl *TokenLimiter) SwapStats() []utils.LimiterStat { + return tkl.limiter.SwapStats() +} + +func (tkl *TokenLimiter) genGetTokenFnc(fields []eval.Field) error { + var m model.Model + event := m.NewEvent() + + for _, field := range fields { + if _, err := event.GetFieldType(field); err != nil { + return err + } + } + + tkl.getToken = func(event Event) string { + var token string + for i, field := range fields { + value, err := event.GetFieldValue(field) + if err != nil { + return "" + } + + if i == 0 { + token = fmt.Sprintf("%s:%v", field, value) + } else { + token += fmt.Sprintf(";%s:%v", field, value) + } + } + return token + } + + return nil +} + +// NewTokenLimiter returns a new rate limiter which is bucketed by fields +func NewTokenLimiter(maxUniqueToken int, numEventsAllowedPerPeriod int, period time.Duration, fields []eval.Field) (*TokenLimiter, error) { + limiter, err := utils.NewLimiter[string](maxUniqueToken, numEventsAllowedPerPeriod, period) + if err != nil { + return nil, err + } + + tkl := &TokenLimiter{ + limiter: limiter, + } + if err := tkl.genGetTokenFnc(fields); err != nil { + return nil, err + } + + return tkl, nil +} diff --git a/pkg/security/generators/schemas/policy/main.go b/pkg/security/generators/schemas/policy/main.go new file mode 100644 index 0000000000000..0dc6181eabead --- /dev/null +++ b/pkg/security/generators/schemas/policy/main.go @@ -0,0 +1,70 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:generate go run github.com/DataDog/datadog-agent/pkg/security/generators/schemas/policy -output ../../../tests/schemas/policy.schema.json + +// Package main holds main related files +package main + +import ( + "encoding/json" + "flag" + "os" + "reflect" + "time" + + "github.com/invopop/jsonschema" + + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) + +func main() { + var output string + flag.StringVar(&output, "output", "", "output file") + flag.Parse() + + if output == "" { + panic("an output file argument is required") + } + + reflector := jsonschema.Reflector{ + ExpandedStruct: true, + Mapper: func(t reflect.Type) *jsonschema.Schema { + switch t { + case reflect.TypeOf(time.Duration(0)): + return &jsonschema.Schema{ + OneOf: []*jsonschema.Schema{ + { + Type: "string", + Format: "duration", + Description: "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)", + }, + { + Type: "integer", + Description: "Duration in nanoseconds", + }, + }, + } + } + return nil + }, + } + + if err := reflector.AddGoComments("github.com/DataDog/datadog-agent/pkg/security/secl/rules/model.go", "../../../secl/rules"); err != nil { + panic(err) + } + + schema := reflector.Reflect(&rules.PolicyDef{}) + schema.ID = "https://github.com/DataDog/datadog-agent/tree/main/pkg/security/secl/rules" + + data, err := json.MarshalIndent(schema, "", " ") + if err != nil { + panic(err) + } + + if err := os.WriteFile(output, data, 0644); err != nil { + panic(err) + } +} diff --git a/pkg/security/metrics/metrics.go b/pkg/security/metrics/metrics.go index 9213c0c4ff739..8bbc1b5546dba 100644 --- a/pkg/security/metrics/metrics.go +++ b/pkg/security/metrics/metrics.go @@ -39,6 +39,12 @@ var ( // Tags: rule_id MetricRulesSuppressed = newRuntimeMetric(".rules.suppressed") + // Rule action metrics + + // MetricRuleActionPerformed is the name of the metric used to count actions performed after a rule was matched + // Tags: rule_id, action_name + MetricRuleActionPerformed = newRuntimeMetric(".rules.action_performed") + // Syscall monitoring metrics // MetricSyscalls is the name of the metric used to count each syscall executed on the host @@ -331,6 +337,18 @@ var ( // Tags: - MetricRulesStatus = newRuntimeMetric(".rules_status") + // Enforcement metrics + + // MetricEnforcementProcessKilled is the name of the metric used to report the number of processes killed + // Tags: rule_id + MetricEnforcementProcessKilled = newRuntimeMetric(".enforcement.process_killed") + // MetricEnforcementRuleDisarmed is the name of the metric used to report that a rule was disarmed + // Tags: rule_id, disarmer_type ('executable', 'container') + MetricEnforcementRuleDisarmed = newRuntimeMetric(".enforcement.rule_disarmed") + // MetricEnforcementRuleRearmed is the name of the metric used to report that a rule was rearmed + // Tags: rule_id + MetricEnforcementRuleRearmed = newRuntimeMetric(".enforcement.rule_rearmed") + // Others // MetricSelfTest is the name of the metric used to report that a self test was performed diff --git a/pkg/security/module/cws.go b/pkg/security/module/cws.go index 4f2c298989e75..1ec45284e524d 100644 --- a/pkg/security/module/cws.go +++ b/pkg/security/module/cws.go @@ -61,8 +61,6 @@ func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityC return nil, err } - ctx, cancelFnc := context.WithCancel(context.Background()) - var selfTester *selftests.SelfTester if cfg.SelfTestEnabled { selfTester, err = selftests.NewSelfTester(cfg, evm.Probe) @@ -73,6 +71,13 @@ func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityC family, address := config.GetFamilyAddress(cfg.SocketPath) + apiServer, err := NewAPIServer(cfg, evm.Probe, opts.MsgSender, evm.StatsdClient, selfTester) + if err != nil { + return nil, err + } + + ctx, cancelFnc := context.WithCancel(context.Background()) + c := &CWSConsumer{ config: cfg, probe: evm.Probe, @@ -80,7 +85,7 @@ func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityC // internals ctx: ctx, cancelFnc: cancelFnc, - apiServer: NewAPIServer(cfg, evm.Probe, opts.MsgSender, evm.StatsdClient, selfTester), + apiServer: apiServer, rateLimiter: events.NewRateLimiter(cfg, evm.StatsdClient), sendStatsChan: make(chan chan bool, 1), grpcServer: NewGRPCServer(family, address), @@ -93,7 +98,7 @@ func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityC if opts.EventSender != nil { c.eventSender = opts.EventSender } else { - c.eventSender = c + c.eventSender = c.APIServer() } seclog.Infof("Instantiating CWS rule engine") @@ -103,7 +108,7 @@ func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityC listeners = append(listeners, selfTester) } - c.ruleEngine, err = rulesmodule.NewRuleEngine(evm, cfg, evm.Probe, c.rateLimiter, c.apiServer, c.eventSender, c.statsdClient, listeners...) + c.ruleEngine, err = rulesmodule.NewRuleEngine(evm, cfg, evm.Probe, c.rateLimiter, c.apiServer, c, c.statsdClient, listeners...) if err != nil { return nil, err } @@ -166,7 +171,7 @@ func (c *CWSConsumer) Start() error { // we can now wait for self test events cb := func(success []eval.RuleID, fails []eval.RuleID, testEvents map[eval.RuleID]*serializers.EventSerializer) { if c.config.SelfTestSendReport { - ReportSelfTest(c.eventSender, c.statsdClient, success, fails, testEvents) + c.reportSelfTest(success, fails, testEvents) } seclog.Debugf("self-test results : success : %v, failed : %v", success, fails) @@ -216,20 +221,19 @@ func (c *CWSConsumer) RunSelfTest(gRPC bool) (bool, error) { return true, nil } -// ReportSelfTest reports to Datadog that a self test was performed -func ReportSelfTest(sender events.EventSender, statsdClient statsd.ClientInterface, success []eval.RuleID, fails []eval.RuleID, testEvents map[eval.RuleID]*serializers.EventSerializer) { +func (c *CWSConsumer) reportSelfTest(success []eval.RuleID, fails []eval.RuleID, testEvents map[eval.RuleID]*serializers.EventSerializer) { // send metric with number of success and fails tags := []string{ fmt.Sprintf("success:%d", len(success)), fmt.Sprintf("fails:%d", len(fails)), } - if err := statsdClient.Count(metrics.MetricSelfTest, 1, tags, 1.0); err != nil { + if err := c.statsdClient.Count(metrics.MetricSelfTest, 1, tags, 1.0); err != nil { seclog.Errorf("failed to send self_test metric: %s", err) } // send the custom event with the list of succeed and failed self tests rule, event := selftests.NewSelfTestEvent(success, fails, testEvents) - sender.SendEvent(rule, event, nil, "") + c.SendEvent(rule, event, nil, "") } // Stop closes the module @@ -250,13 +254,14 @@ func (c *CWSConsumer) Stop() { // HandleCustomEvent is called by the probe when an event should be sent to Datadog but doesn't need evaluation func (c *CWSConsumer) HandleCustomEvent(rule *rules.Rule, event *events.CustomEvent) { - c.eventSender.SendEvent(rule, event, nil, "") + c.SendEvent(rule, event, nil, "") } // SendEvent sends an event to the backend after checking that the rate limiter allows it for the provided rule +// Implements the EventSender interface func (c *CWSConsumer) SendEvent(rule *rules.Rule, event events.Event, extTagsCb func() []string, service string) { if c.rateLimiter.Allow(rule.ID, event) { - c.apiServer.SendEvent(rule, event, extTagsCb, service) + c.eventSender.SendEvent(rule, event, extTagsCb, service) } else { seclog.Tracef("Event on rule %s was dropped due to rate limiting", rule.ID) } @@ -320,3 +325,11 @@ func (c *CWSConsumer) statsSender() { func (c *CWSConsumer) GetRuleEngine() *rulesmodule.RuleEngine { return c.ruleEngine } + +// PrepareForFunctionalTests tweaks the module to be ready for functional tests +// currently it: +// - disables the container running telemetry +func (c *CWSConsumer) PrepareForFunctionalTests() { + // no need for container running telemetry in functional tests + c.crtelemetry = nil +} diff --git a/pkg/security/module/ecs_tags.go b/pkg/security/module/ecs_tags.go new file mode 100644 index 0000000000000..6d9db97c81c7e --- /dev/null +++ b/pkg/security/module/ecs_tags.go @@ -0,0 +1,38 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build docker + +// Package module holds module related files +package module + +import ( + "context" + "time" + + ecsmeta "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata" +) + +func getCurrentECSTaskTags() (map[string]string, error) { + client, err := ecsmeta.V3orV4FromCurrentTask() + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) + defer cancel() + + task, err := client.GetTask(ctx) + if err != nil { + return nil, err + } + + return map[string]string{ + "task_name": task.Family, + "task_family": task.Family, + "task_arn": task.TaskARN, + "task_version": task.Version, + }, nil +} diff --git a/pkg/security/module/msg_sender.go b/pkg/security/module/msg_sender.go index b19499faf47a0..eddd3ebf67f0d 100644 --- a/pkg/security/module/msg_sender.go +++ b/pkg/security/module/msg_sender.go @@ -9,7 +9,7 @@ package module import ( "fmt" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/proto/api" "github.com/DataDog/datadog-agent/pkg/security/reporter" @@ -73,7 +73,7 @@ func (ds *DirectMsgSender) Send(msg *api.SecurityEventMessage, _ func(*api.Secur // NewDirectMsgSender returns a new direct sender func NewDirectMsgSender(stopper startstop.Stopper) (*DirectMsgSender, error) { - useSecRuntimeTrack := pkgconfig.SystemProbe().GetBool("runtime_security_config.use_secruntime_track") + useSecRuntimeTrack := pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.use_secruntime_track") endpoints, destinationsCtx, err := common.NewLogContextRuntime(useSecRuntimeTrack) if err != nil { diff --git a/pkg/config/consts.go b/pkg/security/module/noecs_tags.go similarity index 59% rename from pkg/config/consts.go rename to pkg/security/module/noecs_tags.go index 29200ba9132b4..93e2a2467d4b1 100644 --- a/pkg/config/consts.go +++ b/pkg/security/module/noecs_tags.go @@ -3,9 +3,11 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +//go:build !docker -const ( - // ClusterIDCacheKey is the key name for the orchestrator cluster id in the agent in-mem cache - ClusterIDCacheKey = "orchestratorClusterID" -) +// Package module holds module related files +package module + +func getCurrentECSTaskTags() (map[string]string, error) { + return nil, nil +} diff --git a/pkg/security/module/server.go b/pkg/security/module/server.go index d07dd25e5e871..61ebecea1bbe0 100644 --- a/pkg/security/module/server.go +++ b/pkg/security/module/server.go @@ -21,7 +21,8 @@ import ( "github.com/mailru/easyjson" "go.uber.org/atomic" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/events" @@ -32,7 +33,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/probe/selftests" "github.com/DataDog/datadog-agent/pkg/security/proto/api" "github.com/DataDog/datadog-agent/pkg/security/rules/monitor" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" @@ -44,48 +44,65 @@ import ( ) const ( - maxRetry = 3 + maxRetry = 10 retryDelay = time.Second ) type pendingMsg struct { - ruleID string - backendEvent events.BackendEvent - eventJSON []byte - tags []string - actionReports []model.ActionReport - service string - extTagsCb func() []string - sendAfter time.Time - retry int + ruleID string + backendEvent events.BackendEvent + eventSerializer *serializers.EventSerializer + tags []string + actionReports []model.ActionReport + service string + extTagsCb func() []string + sendAfter time.Time + retry int } -func (p *pendingMsg) ToJSON() ([]byte, bool, error) { - fullyResolved := true +func (p *pendingMsg) isResolved() bool { + for _, report := range p.actionReports { + if !report.IsResolved() { + return false + } + } + return true +} +func (p *pendingMsg) toJSON() ([]byte, error) { p.backendEvent.RuleActions = []json.RawMessage{} for _, report := range p.actionReports { - data, resolved, err := report.ToJSON() + if patcher, ok := report.(serializers.EventSerializerPatcher); ok { + patcher.PatchEvent(p.eventSerializer) + } + + data, err := report.ToJSON() if err != nil { - return nil, false, err + return nil, err } - p.backendEvent.RuleActions = append(p.backendEvent.RuleActions, data) - if !resolved { - fullyResolved = false + if len(data) > 0 { + p.backendEvent.RuleActions = append(p.backendEvent.RuleActions, data) } } backendEventJSON, err := easyjson.Marshal(p.backendEvent) if err != nil { - return nil, false, err + return nil, err } - data := append(backendEventJSON[:len(backendEventJSON)-1], ',') - data = append(data, p.eventJSON[1:]...) + eventJSON, err := p.eventSerializer.ToJSON() + if err != nil { + return nil, err + } - return data, fullyResolved, nil + return mergeJSON(backendEventJSON, eventJSON), nil +} + +func mergeJSON(j1, j2 []byte) []byte { + data := append(j1[:len(j1)-1], ',') + return append(data, j2[1:]...) } // APIServer represents a gRPC server in charge of receiving events sent by @@ -108,6 +125,7 @@ type APIServer struct { policiesStatusLock sync.RWMutex policiesStatus []*api.PolicyStatus msgSender MsgSender + ecsTags map[string]string stopChan chan struct{} stopper startstop.Stopper @@ -191,7 +209,7 @@ func (a *APIServer) dequeue(now time.Time, cb func(msg *pendingMsg) bool) { seclog.Errorf("failed to sent event, max retry reached: %d", msg.retry) return true } - seclog.Debugf("failed to sent event, retry %d/%d", msg.retry, maxRetry) + seclog.Tracef("failed to sent event, retry %d/%d", msg.retry, maxRetry) msg.sendAfter = now.Add(retryDelay) msg.retry++ @@ -200,6 +218,29 @@ func (a *APIServer) dequeue(now time.Time, cb func(msg *pendingMsg) bool) { }) } +func (a *APIServer) updateMsgTags(msg *api.SecurityEventMessage) { + // apply ecs tag if possible + if a.ecsTags != nil { + for key, value := range a.ecsTags { + if !slices.ContainsFunc(msg.Tags, func(tag string) bool { + return strings.HasPrefix(tag, key+":") + }) { + msg.Tags = append(msg.Tags, key+":"+value) + } + } + } + + // look for the service tag if we don't have one yet + if len(msg.Service) == 0 { + for _, tag := range msg.Tags { + if strings.HasPrefix(tag, "service:") { + msg.Service = strings.TrimPrefix(tag, "service:") + break + } + } + } +} + func (a *APIServer) start(ctx context.Context) { ticker := time.NewTicker(200 * time.Millisecond) defer ticker.Stop() @@ -217,29 +258,17 @@ func (a *APIServer) start(ctx context.Context) { } } - // recopy tags - hasService := len(msg.service) != 0 - for _, tag := range msg.tags { - // look for the service tag if we don't have one yet - if !hasService { - if strings.HasPrefix(tag, "service:") { - msg.service = strings.TrimPrefix(tag, "service:") - hasService = true - } - } + // not fully resolved, retry + if !msg.isResolved() && msg.retry < maxRetry { + return false } - data, resolved, err := msg.ToJSON() + data, err := msg.toJSON() if err != nil { seclog.Errorf("failed to marshal event context: %v", err) return true } - // not fully resolved, retry - if !resolved && msg.retry < maxRetry { - return false - } - seclog.Tracef("Sending event message for rule `%s` to security-agent `%s`", msg.ruleID, string(data)) m := &api.SecurityEventMessage{ @@ -248,6 +277,7 @@ func (a *APIServer) start(ctx context.Context) { Service: msg.service, Tags: msg.tags, } + a.updateMsgTags(m) a.msgSender.Send(m, a.expireEvent) @@ -278,7 +308,7 @@ func (a *APIServer) GetConfig(_ context.Context, _ *api.GetConfigParams) (*api.S } // SendEvent forwards events sent by the runtime security module to Datadog -func (a *APIServer) SendEvent(rule *rules.Rule, e events.Event, extTagsCb func() []string, service string) { +func (a *APIServer) SendEvent(rule *rules.Rule, event events.Event, extTagsCb func() []string, service string) { backendEvent := events.BackendEvent{ Title: rule.Def.Description, AgentContext: events.AgentContext{ @@ -296,13 +326,7 @@ func (a *APIServer) SendEvent(rule *rules.Rule, e events.Event, extTagsCb func() backendEvent.AgentContext.PolicyVersion = policy.Def.Version } - eventJSON, err := marshalEvent(e, rule.Opts) - if err != nil { - seclog.Errorf("failed to marshal event: %v", err) - return - } - - seclog.Tracef("Prepare event message for rule `%s` : `%s`", rule.ID, string(eventJSON)) + seclog.Tracef("Prepare event message for rule `%s`", rule.ID) // no retention if there is no ext tags to resolve retention := a.retention @@ -310,51 +334,79 @@ func (a *APIServer) SendEvent(rule *rules.Rule, e events.Event, extTagsCb func() retention = 0 } - // get type tags + container tags if already resolved, see ResolveContainerTags - eventTags := e.GetTags() - ruleID := rule.Def.ID if rule.Def.GroupID != "" { ruleID = rule.Def.GroupID } - eventActionReports := e.GetActionReports() - actionReports := make([]model.ActionReport, 0, len(eventActionReports)) - for _, ar := range eventActionReports { - if ar.IsMatchingRule(rule.ID) { - actionReports = append(actionReports, ar) + // get type tags + container tags if already resolved, see ResolveContainerTags + eventTags := event.GetTags() + + tags := []string{"rule_id:" + ruleID} + tags = append(tags, rule.Tags...) + tags = append(tags, eventTags...) + tags = append(tags, common.QueryAccountIDTag()) + + // model event or custom event ? if model event use queuing so that tags and actions can be handled + if ev, ok := event.(*model.Event); ok { + //return serializers.MarshalEvent(ev, opts) + eventActionReports := ev.GetActionReports() + actionReports := make([]model.ActionReport, 0, len(eventActionReports)) + for _, ar := range eventActionReports { + if ar.IsMatchingRule(rule.ID) { + actionReports = append(actionReports, ar) + } } - } - msg := &pendingMsg{ - ruleID: ruleID, - backendEvent: backendEvent, - eventJSON: eventJSON, - extTagsCb: extTagsCb, - service: service, - sendAfter: time.Now().Add(retention), - tags: make([]string, 0, 1+len(rule.Tags)+len(eventTags)+1), - actionReports: actionReports, - } + msg := &pendingMsg{ + ruleID: ruleID, + backendEvent: backendEvent, + eventSerializer: serializers.NewEventSerializer(ev, rule.Opts), + extTagsCb: extTagsCb, + service: service, + sendAfter: time.Now().Add(retention), + tags: tags, + actionReports: actionReports, + } - msg.tags = append(msg.tags, "rule_id:"+ruleID) - msg.tags = append(msg.tags, rule.Tags...) - msg.tags = append(msg.tags, eventTags...) - msg.tags = append(msg.tags, common.QueryAccountIDTag()) + a.enqueue(msg) + } else { + var ( + backendEventJSON []byte + eventJSON []byte + err error + ) + backendEventJSON, err = easyjson.Marshal(backendEvent) + if err != nil { + seclog.Errorf("failed to marshal event: %v", err) + } - a.enqueue(msg) -} + if ev, ok := event.(events.EventMarshaler); ok { + if eventJSON, err = ev.ToJSON(); err != nil { + seclog.Errorf("failed to marshal event: %v", err) + return + } + } else { + if eventJSON, err = json.Marshal(event); err != nil { + seclog.Errorf("failed to marshal event: %v", err) + return + } + } -func marshalEvent(event events.Event, opts *eval.Opts) ([]byte, error) { - if ev, ok := event.(*model.Event); ok { - return serializers.MarshalEvent(ev, opts) - } + data := mergeJSON(backendEventJSON, eventJSON) - if ev, ok := event.(events.EventMarshaler); ok { - return ev.ToJSON() - } + seclog.Tracef("Sending event message for rule `%s` to security-agent `%s`", ruleID, string(data)) - return json.Marshal(event) + m := &api.SecurityEventMessage{ + RuleID: ruleID, + Data: data, + Service: service, + Tags: tags, + } + a.updateMsgTags(m) + + a.msgSender.Send(m, a.expireEvent) + } } // expireEvent updates the count of expired messages for the appropriate rule @@ -489,7 +541,7 @@ func (a *APIServer) SetCWSConsumer(consumer *CWSConsumer) { } // NewAPIServer returns a new gRPC event server -func NewAPIServer(cfg *config.RuntimeSecurityConfig, probe *sprobe.Probe, msgSender MsgSender, client statsd.ClientInterface, selfTester *selftests.SelfTester) *APIServer { +func NewAPIServer(cfg *config.RuntimeSecurityConfig, probe *sprobe.Probe, msgSender MsgSender, client statsd.ClientInterface, selfTester *selftests.SelfTester) (*APIServer, error) { stopper := startstop.NewSerialStopper() as := &APIServer{ @@ -508,7 +560,7 @@ func NewAPIServer(cfg *config.RuntimeSecurityConfig, probe *sprobe.Probe, msgSen } if as.msgSender == nil { - if pkgconfig.SystemProbe().GetBool("runtime_security_config.direct_send_from_system_probe") { + if pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.direct_send_from_system_probe") { msgSender, err := NewDirectMsgSender(stopper) if err != nil { log.Errorf("failed to setup direct reporter: %v", err) @@ -522,5 +574,13 @@ func NewAPIServer(cfg *config.RuntimeSecurityConfig, probe *sprobe.Probe, msgSen } } - return as + if env.IsECS() || env.IsECSFargate() { + tags, err := getCurrentECSTaskTags() + if err != nil { + return nil, err + } + as.ecsTags = tags + } + + return as, nil } diff --git a/pkg/security/module/server_linux.go b/pkg/security/module/server_linux.go index c4d1d9556cd11..8792568894686 100644 --- a/pkg/security/module/server_linux.go +++ b/pkg/security/module/server_linux.go @@ -46,6 +46,24 @@ func (a *APIServer) DumpProcessCache(_ context.Context, params *api.DumpProcessC }, nil } +// DumpActivity handles an activity dump request +func (a *APIServer) DumpActivity(_ context.Context, params *api.ActivityDumpParams) (*api.ActivityDumpMessage, error) { + p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) + if !ok { + return nil, fmt.Errorf("not supported") + } + + if managers := p.GetProfileManagers(); managers != nil { + msg, err := managers.DumpActivity(params) + if err != nil { + seclog.Errorf("%s", err.Error()) + } + return msg, nil + } + + return nil, fmt.Errorf("monitor not configured") +} + // ListActivityDumps returns the list of active dumps func (a *APIServer) ListActivityDumps(_ context.Context, params *api.ActivityDumpListParams) (*api.ActivityDumpListMessage, error) { p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) diff --git a/pkg/security/probe/actions.go b/pkg/security/probe/actions.go index 12ecf8d37987d..4f1ac2a46621e 100644 --- a/pkg/security/probe/actions.go +++ b/pkg/security/probe/actions.go @@ -28,10 +28,10 @@ type KillActionReport struct { DetectedAt time.Time KilledAt time.Time ExitedAt time.Time - Rule *rules.Rule // internal resolved bool + rule *rules.Rule } // JKillActionReport used to serialize date @@ -47,13 +47,19 @@ type JKillActionReport struct { TTR string `json:"ttr,omitempty"` } -// ToJSON marshal the action -func (k *KillActionReport) ToJSON() ([]byte, bool, error) { +// IsResolved return if the action is resolved +func (k *KillActionReport) IsResolved() bool { k.RLock() defer k.RUnlock() // for sigkill wait for exit - resolved := k.Signal != "SIGKILL" || k.resolved + return k.Signal != "SIGKILL" || k.resolved +} + +// ToJSON marshal the action +func (k *KillActionReport) ToJSON() ([]byte, error) { + k.RLock() + defer k.RUnlock() jk := JKillActionReport{ Type: rules.KillAction, @@ -71,10 +77,10 @@ func (k *KillActionReport) ToJSON() ([]byte, bool, error) { data, err := utils.MarshalEasyJSON(jk) if err != nil { - return nil, false, err + return nil, err } - return data, resolved, nil + return data, nil } // IsMatchingRule returns true if this action report is targeted at the given rule ID @@ -82,5 +88,5 @@ func (k *KillActionReport) IsMatchingRule(ruleID eval.RuleID) bool { k.RLock() defer k.RUnlock() - return k.Rule.ID == ruleID + return k.rule.ID == ruleID } diff --git a/pkg/security/probe/actions_linux.go b/pkg/security/probe/actions_linux.go new file mode 100644 index 0000000000000..54126bb2b12f5 --- /dev/null +++ b/pkg/security/probe/actions_linux.go @@ -0,0 +1,91 @@ +//go:generate go run github.com/mailru/easyjson/easyjson -gen_build_flags=-mod=mod -no_std_marshalers -build_tags linux $GOFILE + +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package probe holds probe related files +package probe + +import ( + "sync" + "time" + + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/DataDog/datadog-agent/pkg/security/serializers" + "github.com/DataDog/datadog-agent/pkg/security/utils" +) + +const ( + // HashTriggerTimeout hash triggered because of a timeout + HashTriggerTimeout = "timeout" + // HashTriggerProcessExit hash triggered on process exit + HashTriggerProcessExit = "process_exit" +) + +// HashActionReport defines a hash action reports +// easyjson:json +type HashActionReport struct { + sync.RWMutex + + Type string `json:"type"` + Path string `json:"path"` + State string `json:"state"` + Trigger string `json:"trigger"` + + // internal + resolved bool + rule *rules.Rule + pid uint32 + seenAt time.Time + fileEvent model.FileEvent + crtID containerutils.ContainerID + eventType model.EventType +} + +// IsResolved return if the action is resolved +func (k *HashActionReport) IsResolved() bool { + k.RLock() + defer k.RUnlock() + + return k.resolved +} + +// ToJSON marshal the action +func (k *HashActionReport) ToJSON() ([]byte, error) { + k.Lock() + defer k.Unlock() + + k.Type = rules.HashAction + k.Path = k.fileEvent.PathnameStr + k.State = k.fileEvent.HashState.String() + + data, err := utils.MarshalEasyJSON(k) + if err != nil { + return nil, err + } + + return data, nil +} + +// IsMatchingRule returns true if this action report is targeted at the given rule ID +func (k *HashActionReport) IsMatchingRule(ruleID eval.RuleID) bool { + k.RLock() + defer k.RUnlock() + + return k.rule.ID == ruleID +} + +// PatchEvent implements the EventSerializerPatcher interface +func (k *HashActionReport) PatchEvent(ev *serializers.EventSerializer) { + if ev.FileEventSerializer == nil { + return + } + + ev.FileEventSerializer.HashState = k.fileEvent.HashState.String() + ev.FileEventSerializer.Hashes = k.fileEvent.Hashes +} diff --git a/pkg/security/probe/actions_linux_easyjson.go b/pkg/security/probe/actions_linux_easyjson.go new file mode 100644 index 0000000000000..5ab1d450c2823 --- /dev/null +++ b/pkg/security/probe/actions_linux_easyjson.go @@ -0,0 +1,95 @@ +//go:build linux +// +build linux + +// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. + +package probe + +import ( + json "encoding/json" + easyjson "github.com/mailru/easyjson" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" +) + +// suppress unused package warning +var ( + _ *json.RawMessage + _ *jlexer.Lexer + _ *jwriter.Writer + _ easyjson.Marshaler +) + +func easyjson7cab6e30DecodeGithubComDataDogDatadogAgentPkgSecurityProbe(in *jlexer.Lexer, out *HashActionReport) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "type": + out.Type = string(in.String()) + case "path": + out.Path = string(in.String()) + case "state": + out.State = string(in.String()) + case "trigger": + out.Trigger = string(in.String()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson7cab6e30EncodeGithubComDataDogDatadogAgentPkgSecurityProbe(out *jwriter.Writer, in HashActionReport) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"type\":" + out.RawString(prefix[1:]) + out.String(string(in.Type)) + } + { + const prefix string = ",\"path\":" + out.RawString(prefix) + out.String(string(in.Path)) + } + { + const prefix string = ",\"state\":" + out.RawString(prefix) + out.String(string(in.State)) + } + { + const prefix string = ",\"trigger\":" + out.RawString(prefix) + out.String(string(in.Trigger)) + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v HashActionReport) MarshalEasyJSON(w *jwriter.Writer) { + easyjson7cab6e30EncodeGithubComDataDogDatadogAgentPkgSecurityProbe(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *HashActionReport) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson7cab6e30DecodeGithubComDataDogDatadogAgentPkgSecurityProbe(l, v) +} diff --git a/pkg/security/probe/config/config.go b/pkg/security/probe/config/config.go index 5ddd7ab15913c..879a231222153 100644 --- a/pkg/security/probe/config/config.go +++ b/pkg/security/probe/config/config.go @@ -14,8 +14,8 @@ import ( "time" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -141,7 +141,7 @@ type Config struct { // NewConfig returns a new Config object func NewConfig() (*Config, error) { - sysconfig.Adjust(coreconfig.SystemProbe()) + sysconfig.Adjust(pkgconfigsetup.SystemProbe()) setEnv() @@ -173,8 +173,8 @@ func NewConfig() (*Config, error) { SyscallsMonitorEnabled: getBool("syscalls_monitor.enabled"), // event server - SocketPath: coreconfig.SystemProbe().GetString(join(evNS, "socket")), - EventServerBurst: coreconfig.SystemProbe().GetInt(join(evNS, "event_server.burst")), + SocketPath: pkgconfigsetup.SystemProbe().GetString(join(evNS, "socket")), + EventServerBurst: pkgconfigsetup.SystemProbe().GetInt(join(evNS, "event_server.burst")), // runtime compilation RuntimeCompilationEnabled: getBool("runtime_compilation.enabled"), @@ -267,41 +267,41 @@ func getAllKeys(key string) (string, string) { func isSet(key string) bool { deprecatedKey, newKey := getAllKeys(key) - return coreconfig.SystemProbe().IsSet(deprecatedKey) || coreconfig.SystemProbe().IsSet(newKey) + return pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) || pkgconfigsetup.SystemProbe().IsSet(newKey) } func getBool(key string) bool { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetBool(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetBool(deprecatedKey) } - return coreconfig.SystemProbe().GetBool(newKey) + return pkgconfigsetup.SystemProbe().GetBool(newKey) } func getInt(key string) int { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetInt(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetInt(deprecatedKey) } - return coreconfig.SystemProbe().GetInt(newKey) + return pkgconfigsetup.SystemProbe().GetInt(newKey) } func getString(key string) string { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetString(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetString(deprecatedKey) } - return coreconfig.SystemProbe().GetString(newKey) + return pkgconfigsetup.SystemProbe().GetString(newKey) } func getStringSlice(key string) []string { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetStringSlice(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetStringSlice(deprecatedKey) } - return coreconfig.SystemProbe().GetStringSlice(newKey) + return pkgconfigsetup.SystemProbe().GetStringSlice(newKey) } diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index 6bd99c787014d..d0c78dd49e524 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -11855,6 +11855,13 @@ "uname_release": "4.14.350-266.564.amzn2.aarch64", "cindex": 3 }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.352-267.564.amzn2.aarch64", + "cindex": 3 + }, { "distrib": "amzn", "version": "2", @@ -12653,6 +12660,13 @@ "uname_release": "4.14.350-266.564.amzn2.x86_64", "cindex": 8 }, + { + "distrib": "amzn", + "version": "2", + "arch": "x86_64", + "uname_release": "4.14.352-267.564.amzn2.x86_64", + "cindex": 8 + }, { "distrib": "amzn", "version": "2", @@ -18239,6 +18253,13 @@ "uname_release": "4.14.35-2047.539.5.el7uek.aarch64", "cindex": 89 }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.540.4.1.el7uek.aarch64", + "cindex": 89 + }, { "distrib": "ol", "version": "7", @@ -19051,6 +19072,13 @@ "uname_release": "3.10.0-1160.119.1.0.2.el7.x86_64", "cindex": 93 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-1160.119.1.0.3.el7.x86_64", + "cindex": 93 + }, { "distrib": "ol", "version": "7", @@ -20801,6 +20829,13 @@ "uname_release": "4.1.12-124.88.3.el7uek.x86_64", "cindex": 94 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-124.89.4.el7uek.x86_64", + "cindex": 94 + }, { "distrib": "ol", "version": "7", @@ -23650,6 +23685,13 @@ "uname_release": "4.14.35-2047.540.3.el7uek.x86_64", "cindex": 96 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.540.4.1.el7uek.x86_64", + "cindex": 96 + }, { "distrib": "ol", "version": "7", @@ -23657,6 +23699,27 @@ "uname_release": "4.14.35-2047.540.4.el7uek.x86_64", "cindex": 96 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.541.1.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.541.2.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.541.3.el7uek.x86_64", + "cindex": 96 + }, { "distrib": "ol", "version": "7", diff --git a/pkg/security/probe/discarders_linux.go b/pkg/security/probe/discarders_linux.go index 8adc92a9737a5..30b4333d1995b 100644 --- a/pkg/security/probe/discarders_linux.go +++ b/pkg/security/probe/discarders_linux.go @@ -399,13 +399,14 @@ func (id *inodeDiscarders) discardParentInode(req *erpc.Request, rs *rules.RuleS parentKey := pathKey for i := 0; i < discarderDepth; i++ { - parentKey, err = id.dentryResolver.GetParent(parentKey) + key, err := id.dentryResolver.GetParent(parentKey) if err != nil || dentry.IsFakeInode(pathKey.Inode) { if i == 0 { return false, 0, 0, err } break } + parentKey = key } // do not insert multiple time the same discarder diff --git a/pkg/security/probe/field_handlers_ebpf.go b/pkg/security/probe/field_handlers_ebpf.go index b9cb03e12cfc7..6e391707cb57d 100644 --- a/pkg/security/probe/field_handlers_ebpf.go +++ b/pkg/security/probe/field_handlers_ebpf.go @@ -178,7 +178,10 @@ func (fh *EBPFFieldHandlers) ResolveMountRootPath(ev *model.Event, e *model.Moun func (fh *EBPFFieldHandlers) ResolveContainerContext(ev *model.Event) (*model.ContainerContext, bool) { if ev.ContainerContext.ContainerID != "" && !ev.ContainerContext.Resolved { if containerContext, _ := fh.resolvers.CGroupResolver.GetWorkload(string(ev.ContainerContext.ContainerID)); containerContext != nil { - ev.ContainerContext = &containerContext.ContainerContext + if containerContext.CGroupFlags.IsContainer() { + ev.ContainerContext = &containerContext.ContainerContext + } + ev.ContainerContext.Resolved = true } } @@ -187,23 +190,23 @@ func (fh *EBPFFieldHandlers) ResolveContainerContext(ev *model.Event) (*model.Co // ResolveContainerRuntime retrieves the container runtime managing the container func (fh *EBPFFieldHandlers) ResolveContainerRuntime(ev *model.Event, _ *model.ContainerContext) string { - if _, found := fh.ResolveContainerContext(ev); !found { - return "" + if ev.CGroupContext.CGroupFlags != 0 && ev.ContainerContext.ContainerID != "" { + return getContainerRuntime(ev.CGroupContext.CGroupFlags) } - return getContainerRuntime((ev.CGroupContext.CGroupFlags)) + return "" } // getContainerRuntime returns the container runtime managing the cgroup func getContainerRuntime(flags containerutils.CGroupFlags) string { - switch { - case (uint64(flags) & uint64(containerutils.CGroupManagerCRI)) != 0: + switch containerutils.CGroupManager(flags & containerutils.CGroupManagerMask) { + case containerutils.CGroupManagerCRI: return string(workloadmeta.ContainerRuntimeContainerd) - case (uint64(flags) & uint64(containerutils.CGroupManagerCRIO)) != 0: + case containerutils.CGroupManagerCRIO: return string(workloadmeta.ContainerRuntimeCRIO) - case (uint64(flags) & uint64(containerutils.CGroupManagerDocker)) != 0: + case containerutils.CGroupManagerDocker: return string(workloadmeta.ContainerRuntimeDocker) - case (uint64(flags) & uint64(containerutils.CGroupManagerPodman)) != 0: + case containerutils.CGroupManagerPodman: return string(workloadmeta.ContainerRuntimePodman) default: return "" @@ -239,12 +242,18 @@ func (fh *EBPFFieldHandlers) ResolveProcessArgv0(_ *model.Event, process *model. // ResolveProcessArgs resolves the args of the event func (fh *EBPFFieldHandlers) ResolveProcessArgs(ev *model.Event, process *model.Process) string { - return strings.Join(fh.ResolveProcessArgv(ev, process), " ") + if process.Args == "" { + process.Args = strings.Join(fh.ResolveProcessArgv(ev, process), " ") + } + return process.Args } // ResolveProcessArgsScrubbed resolves the args of the event func (fh *EBPFFieldHandlers) ResolveProcessArgsScrubbed(ev *model.Event, process *model.Process) string { - return strings.Join(fh.ResolveProcessArgvScrubbed(ev, process), " ") + if process.ArgsScrubbed == "" { + process.ArgsScrubbed = strings.Join(fh.ResolveProcessArgvScrubbed(ev, process), " ") + } + return process.ArgsScrubbed } // ResolveProcessArgv resolves the unscrubbed args of the process as an array. Use with caution. @@ -552,7 +561,11 @@ func (fh *EBPFFieldHandlers) ResolveCGroupManager(ev *model.Event, _ *model.CGro func (fh *EBPFFieldHandlers) ResolveContainerID(ev *model.Event, e *model.ContainerContext) string { if len(e.ContainerID) == 0 { if entry, _ := fh.ResolveProcessCacheEntry(ev); entry != nil { - e.ContainerID = containerutils.ContainerID(entry.ContainerID) + if entry.CGroup.CGroupFlags.IsContainer() { + e.ContainerID = containerutils.ContainerID(entry.ContainerID) + } else { + e.ContainerID = "" + } return string(e.ContainerID) } } diff --git a/pkg/security/probe/file_hasher.go b/pkg/security/probe/file_hasher.go new file mode 100644 index 0000000000000..372e15ea2ba90 --- /dev/null +++ b/pkg/security/probe/file_hasher.go @@ -0,0 +1,123 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package probe holds probe related files +package probe + +import ( + "slices" + "sync" + "time" + + "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-agent/pkg/security/resolvers/hash" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/DataDog/datadog-agent/pkg/security/utils" +) + +const ( + defaultHashActionFlushDelay = 5 * time.Second +) + +// FileHasher defines a file hasher structure +type FileHasher struct { + sync.Mutex + + cfg *config.Config + resolver *hash.Resolver + + pendingReports []*HashActionReport +} + +// NewFileHasher returns a new FileHasher +func NewFileHasher(cfg *config.Config, resolver *hash.Resolver) *FileHasher { + return &FileHasher{ + cfg: cfg, + resolver: resolver, + } +} + +// AddPendingReports add a pending reports +func (p *FileHasher) AddPendingReports(report *HashActionReport) { + p.Lock() + defer p.Unlock() + + p.pendingReports = append(p.pendingReports, report) +} + +func (p *FileHasher) hash(report *HashActionReport) { + p.resolver.HashFileEvent(report.eventType, report.crtID, report.pid, &report.fileEvent) + report.resolved = true +} + +// FlushPendingReports flush pending reports +func (p *FileHasher) FlushPendingReports() { + p.Lock() + defer p.Unlock() + + p.pendingReports = slices.DeleteFunc(p.pendingReports, func(report *HashActionReport) bool { + report.Lock() + defer report.Unlock() + + if time.Now().After(report.seenAt.Add(defaultHashActionFlushDelay)) { + report.Trigger = HashTriggerTimeout + p.hash(report) + return true + } + return false + }) +} + +// HandleProcessExited handles process exited events +func (p *FileHasher) HandleProcessExited(event *model.Event) { + p.Lock() + defer p.Unlock() + + p.pendingReports = slices.DeleteFunc(p.pendingReports, func(report *HashActionReport) bool { + report.Lock() + defer report.Unlock() + + if report.pid == event.ProcessContext.Pid { + report.Trigger = HashTriggerProcessExit + p.hash(report) + return true + } + return false + }) +} + +// HashAndReport hash and report, returns true if the hash computation is supported for the given event +func (p *FileHasher) HashAndReport(rule *rules.Rule, ev *model.Event) bool { + eventType := ev.GetEventType() + + if !p.cfg.RuntimeSecurity.HashResolverEnabled { + return false + } + + // only open events are supported + if eventType != model.FileOpenEventType && eventType != model.ExecEventType { + return false + } + + if ev.ProcessContext.Pid == utils.Getpid() { + return false + } + + report := &HashActionReport{ + rule: rule, + pid: ev.ProcessContext.Pid, + crtID: ev.ProcessContext.ContainerID, + seenAt: ev.Timestamp, + fileEvent: ev.Open.File, + eventType: eventType, + } + ev.ActionReports = append(ev.ActionReports, report) + p.pendingReports = append(p.pendingReports, report) + + return true +} diff --git a/pkg/security/probe/kfilters/approvers.go b/pkg/security/probe/kfilters/approvers.go index f902b9ccd0e83..1314a54e958bb 100644 --- a/pkg/security/probe/kfilters/approvers.go +++ b/pkg/security/probe/kfilters/approvers.go @@ -17,19 +17,29 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -// BasenameApproverKernelMapName defines the basename approver kernel map name -const BasenameApproverKernelMapName = "basename_approvers" +const ( + // BasenameApproverKernelMapName defines the basename approver kernel map name + BasenameApproverKernelMapName = "basename_approvers" + + // BasenameApproverType is the type of basename approver + BasenameApproverType = "basename" + // FlagApproverType is the type of flags approver + FlagApproverType = "flag" + // AUIDApproverType is the type of auid approver + AUIDApproverType = "auid" +) -type kfiltersGetter func(approvers rules.Approvers) (ActiveKFilters, error) +type kfiltersGetter func(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) // KFilterGetters var contains all the kfilter getters var KFilterGetters = make(map[eval.EventType]kfiltersGetter) func newBasenameKFilter(tableName string, eventType model.EventType, basename string) (activeKFilter, error) { return &eventMaskEntry{ - tableName: tableName, - tableKey: ebpf.NewStringMapItem(basename, BasenameFilterSize), - eventMask: uint64(1 << (eventType - 1)), + approverType: BasenameApproverType, + tableName: tableName, + tableKey: ebpf.NewStringMapItem(basename, BasenameFilterSize), + eventMask: uint64(1 << (eventType - 1)), }, nil } @@ -59,10 +69,11 @@ func newKFilterWithUInt32Flags(tableName string, flags ...uint32) (activeKFilter } return &arrayEntry{ - tableName: tableName, - index: uint32(0), - value: ebpf.NewUint32FlagsMapItem(bitmask), - zeroValue: ebpf.Uint32FlagsZeroMapItem, + approverType: FlagApproverType, + tableName: tableName, + index: uint32(0), + value: ebpf.NewUint32FlagsMapItem(bitmask), + zeroValue: ebpf.Uint32FlagsZeroMapItem, }, nil } @@ -73,10 +84,11 @@ func newKFilterWithUInt64Flags(tableName string, flags ...uint64) (activeKFilter } return &arrayEntry{ - tableName: tableName, - index: uint32(0), - value: ebpf.NewUint64FlagsMapItem(bitmask), - zeroValue: ebpf.Uint64FlagsZeroMapItem, + approverType: FlagApproverType, + tableName: tableName, + index: uint32(0), + value: ebpf.NewUint64FlagsMapItem(bitmask), + zeroValue: ebpf.Uint64FlagsZeroMapItem, }, nil } @@ -92,7 +104,9 @@ func getEnumsKFilters(tableName string, enums ...uint64) (activeKFilter, error) return newKFilterWithUInt64Flags(tableName, flags...) } -func getBasenameKFilters(eventType model.EventType, field string, approvers rules.Approvers) ([]activeKFilter, error) { +func getBasenameKFilters(eventType model.EventType, field string, approvers rules.Approvers) ([]activeKFilter, []eval.Field, error) { + var fieldHandled []eval.Field + stringValues := func(fvs rules.FilterValues) []string { var values []string for _, v := range fvs { @@ -112,63 +126,66 @@ func getBasenameKFilters(eventType model.EventType, field string, approvers rule case prefix + model.NameSuffix: activeKFilters, err := newBasenameKFilters(BasenameApproverKernelMapName, eventType, stringValues(values)...) if err != nil { - return nil, err + return nil, nil, err } kfilters = append(kfilters, activeKFilters...) - + fieldHandled = append(fieldHandled, field) case prefix + model.PathSuffix: for _, value := range stringValues(values) { basename := path.Base(value) activeKFilter, err := newBasenameKFilter(BasenameApproverKernelMapName, eventType, basename) if err != nil { - return nil, err + return nil, nil, err } kfilters = append(kfilters, activeKFilter) } + fieldHandled = append(fieldHandled, field) } } - return kfilters, nil + return kfilters, fieldHandled, nil } -func basenameKFilterGetter(event model.EventType) kfiltersGetter { - return func(approvers rules.Approvers) (ActiveKFilters, error) { - basenameKFilters, err := getBasenameKFilters(event, "file", approvers) - if err != nil { - return nil, err - } - return newActiveKFilters(basenameKFilters...), nil - } -} +func fimKFiltersGetter(eventType model.EventType, fields []eval.Field) kfiltersGetter { + return func(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + var ( + kfilters []activeKFilter + fieldHandled []eval.Field + ) -func basenameskfiltersGetter(event model.EventType, field1, field2 string) kfiltersGetter { - return func(approvers rules.Approvers) (ActiveKFilters, error) { - basenameKFilters, err := getBasenameKFilters(event, field1, approvers) - if err != nil { - return nil, err + for _, field := range fields { + kfilter, handled, err := getBasenameKFilters(eventType, field, approvers) + if err != nil { + return nil, nil, err + } + kfilters = append(kfilters, kfilter...) + fieldHandled = append(fieldHandled, handled...) } - basenameKFilters2, err := getBasenameKFilters(event, field2, approvers) + + kfs, handled, err := getProcessKFilters(model.FileOpenEventType, approvers) if err != nil { - return nil, err + return nil, nil, err } - basenameKFilters = append(basenameKFilters, basenameKFilters2...) - return newActiveKFilters(basenameKFilters...), nil + kfilters = append(kfilters, kfs...) + fieldHandled = append(fieldHandled, handled...) + + return newActiveKFilters(kfilters...), fieldHandled, nil } } func init() { - KFilterGetters["chmod"] = basenameKFilterGetter(model.FileChmodEventType) - KFilterGetters["chown"] = basenameKFilterGetter(model.FileChownEventType) - KFilterGetters["link"] = basenameskfiltersGetter(model.FileLinkEventType, "file", "file.destination") - KFilterGetters["mkdir"] = basenameKFilterGetter(model.FileMkdirEventType) - KFilterGetters["open"] = openOnNewApprovers - KFilterGetters["rename"] = basenameskfiltersGetter(model.FileRenameEventType, "file", "file.destination") - KFilterGetters["rmdir"] = basenameKFilterGetter(model.FileRmdirEventType) - KFilterGetters["unlink"] = basenameKFilterGetter(model.FileUnlinkEventType) - KFilterGetters["utimes"] = basenameKFilterGetter(model.FileUtimesEventType) - KFilterGetters["mmap"] = mmapKFilters - KFilterGetters["mprotect"] = mprotectKFilters - KFilterGetters["splice"] = spliceKFilters - KFilterGetters["chdir"] = basenameKFilterGetter(model.FileChdirEventType) - KFilterGetters["bpf"] = bpfKFilters + KFilterGetters["chmod"] = fimKFiltersGetter(model.FileChmodEventType, []eval.Field{"file"}) + KFilterGetters["chown"] = fimKFiltersGetter(model.FileChownEventType, []eval.Field{"file"}) + KFilterGetters["link"] = fimKFiltersGetter(model.FileLinkEventType, []eval.Field{"file", "file.destination"}) + KFilterGetters["mkdir"] = fimKFiltersGetter(model.FileMkdirEventType, []eval.Field{"file"}) + KFilterGetters["open"] = openKFiltersGetter + KFilterGetters["rename"] = fimKFiltersGetter(model.FileRenameEventType, []eval.Field{"file", "file.destination"}) + KFilterGetters["rmdir"] = fimKFiltersGetter(model.FileRmdirEventType, []eval.Field{"file"}) + KFilterGetters["unlink"] = fimKFiltersGetter(model.FileUnlinkEventType, []eval.Field{"file"}) + KFilterGetters["utimes"] = fimKFiltersGetter(model.FileUtimesEventType, []eval.Field{"file"}) + KFilterGetters["mmap"] = mmapKFiltersGetter + KFilterGetters["mprotect"] = mprotectKFiltersGetter + KFilterGetters["splice"] = spliceKFiltersGetter + KFilterGetters["chdir"] = fimKFiltersGetter(model.FileChdirEventType, []eval.Field{"file"}) + KFilterGetters["bpf"] = bpfKFiltersGetter } diff --git a/pkg/security/probe/kfilters/approvers_test.go b/pkg/security/probe/kfilters/approvers_test.go index b0d867f88f14b..0298b2b972f67 100644 --- a/pkg/security/probe/kfilters/approvers_test.go +++ b/pkg/security/probe/kfilters/approvers_test.go @@ -11,6 +11,7 @@ package kfilters import ( "testing" + "github.com/DataDog/datadog-agent/pkg/security/ebpf" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -82,3 +83,116 @@ func TestApproverGlob(t *testing.T) { t.Fatalf("expected approver not found: %v", values) } } + +func TestApproverFlags(t *testing.T) { + enabled := map[eval.EventType]bool{"*": true} + + ruleOpts, evalOpts := rules.NewBothOpts(enabled) + + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) + rules.AddTestRuleExpr(t, rs, `open.flags & (O_SYNC | O_NOCTTY) > 0`) + capabilities, exists := allCapabilities["open"] + if !exists { + t.Fatal("no capabilities for open") + } + approvers, err := rs.GetEventTypeApprovers("open", capabilities) + if err != nil { + t.Fatal(err) + } + if values, exists := approvers["open.flags"]; !exists || len(values) != 1 { + t.Fatalf("expected approver not found: %v", values) + } +} + +func TestApproverWildcardBasename(t *testing.T) { + enabled := map[eval.EventType]bool{"*": true} + + ruleOpts, evalOpts := rules.NewBothOpts(enabled) + + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) + rules.AddTestRuleExpr(t, rs, `open.file.path =~ "/var/run/secrets/*"`) + capabilities, exists := allCapabilities["open"] + if !exists { + t.Fatal("no capabilities for open") + } + approvers, err := rs.GetEventTypeApprovers("open", capabilities) + if err != nil { + t.Fatal(err) + } + if values, exists := approvers["open.file.path"]; exists || len(values) != 0 { + t.Fatalf("unexpected approver found: %v", values) + } +} + +func TestApproverAUIDRange(t *testing.T) { + enabled := map[eval.EventType]bool{"*": true} + + ruleOpts, evalOpts := rules.NewBothOpts(enabled) + + assert := func(t *testing.T, ruleDefs []string, min, max uint32) { + t.Helper() + + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) + rules.AddTestRuleExpr(t, rs, ruleDefs...) + + capabilities, exists := allCapabilities["open"] + if !exists { + t.Fatal("no capabilities for open") + } + approvers, err := rs.GetEventTypeApprovers("open", capabilities) + if err != nil { + t.Fatal(err) + } + if values, exists := approvers["process.auid"]; !exists { + t.Fatalf("expected approver not found: %+v", values) + } + + kfilters, _, err := KFilterGetters["open"](approvers) + if err != nil { + t.Fatal(err) + } + if len(kfilters) != 1 { + + if min != 0 && max != 0 { + t.Fatalf("expected kfilter not found: %+v", kfilters) + } else { + // no kfilter expected + return + } + } + + key := makeEntryKey(auidRangeApproversTable, model.FileOpenEventType) + entry := kfilters[key] + if entry == nil { + t.Fatalf("expected kfilter not found: %+v => %+v", key, kfilters) + } + + value := entry.(*hashEntry).value.(*ebpf.UInt32RangeMapItem) + if value.Min != min || value.Max != max { + t.Fatalf("expected kfilter not found: %+v => %+v", kfilters, value) + } + } + + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid > 1000 && process.auid < 2000`}, 0, maxAUID) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid > 1000`}, 1001, maxAUID) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid < 1000`}, 0, 999) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid >= 1000 && process.auid <= 2000`}, 0, maxAUID) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid >= 1000`}, 1000, maxAUID) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid <= 1000`}, 0, 1000) + + assert(t, []string{ + `open.file.path =~ "/tmp/*" && process.auid > 1000`, + `open.file.path =~ "/tmp/*" && process.auid < 500`, + }, 0, maxAUID) + assert(t, []string{ + `open.file.path =~ "/tmp/*" && process.auid >= 1000`, + `open.file.path =~ "/tmp/*" && process.auid > 1500`, + }, 1000, maxAUID) + assert(t, []string{ + `open.file.path =~ "/tmp/*" && process.auid < 1000`, + `open.file.path =~ "/tmp/*" && process.auid < 500`, + }, 0, 999) + assert(t, []string{ + `open.file.path =~ "/tmp/*" && process.auid != AUDIT_AUID_UNSET`, + }, 0, maxAUID) +} diff --git a/pkg/security/probe/kfilters/bpf.go b/pkg/security/probe/kfilters/bpf.go index df39a91182744..3b17b394a3097 100644 --- a/pkg/security/probe/kfilters/bpf.go +++ b/pkg/security/probe/kfilters/bpf.go @@ -9,8 +9,6 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) @@ -22,20 +20,22 @@ var bpfCapabilities = rules.FieldCapabilities{ }, } -func bpfKFilters(approvers rules.Approvers) (ActiveKFilters, error) { - var bpfKFilters []activeKFilter +func bpfKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + var ( + kfilters []activeKFilter + fieldHandled []eval.Field + ) for field, values := range approvers { switch field { case "bpf.cmd": kfilter, err := getEnumsKFilters("bpf_cmd_approvers", uintValues[uint64](values)...) if err != nil { - return nil, err + return nil, nil, err } - bpfKFilters = append(bpfKFilters, kfilter) - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } } - return newActiveKFilters(bpfKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/kfilters/capabilities_linux.go b/pkg/security/probe/kfilters/capabilities_linux.go index 187273f6066ce..75f6d00cf5036 100644 --- a/pkg/security/probe/kfilters/capabilities_linux.go +++ b/pkg/security/probe/kfilters/capabilities_linux.go @@ -29,56 +29,46 @@ func validateBasenameFilter(value rules.FilterValue) bool { return false } -func oneBasenameCapabilities(event string) rules.FieldCapabilities { - return rules.FieldCapabilities{ - { - Field: event + ".file.path", - TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, - }, - { - Field: event + ".file.name", - TypeBitmask: eval.ScalarValueType, - }, +func buildBasenameCapabilities(event string, fields ...string) rules.FieldCapabilities { + var caps rules.FieldCapabilities + + for _, field := range fields { + caps = append(caps, rules.FieldCapabilities{ + { + Field: event + "." + field + ".path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, + ValidateFnc: validateBasenameFilter, + }, + { + Field: event + "." + field + ".name", + TypeBitmask: eval.ScalarValueType, + }, + }...) } + return caps } -func twoBasenameCapabilities(event string, field1, field2 string) rules.FieldCapabilities { - return rules.FieldCapabilities{ - { - Field: event + "." + field1 + ".path", - TypeBitmask: eval.ScalarValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, - }, - { - Field: event + "." + field1 + ".name", - TypeBitmask: eval.ScalarValueType, - }, - { - Field: event + "." + field2 + ".path", - TypeBitmask: eval.ScalarValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, - }, - { - Field: event + "." + field2 + ".name", - TypeBitmask: eval.ScalarValueType, - }, +func mergeCapabilities(caps ...rules.FieldCapabilities) rules.FieldCapabilities { + var result rules.FieldCapabilities + for _, c := range caps { + result = append(result, c...) } + return result } func init() { - allCapabilities["chmod"] = oneBasenameCapabilities("chmod") - allCapabilities["chown"] = oneBasenameCapabilities("chown") - allCapabilities["link"] = twoBasenameCapabilities("link", "file", "file.destination") - allCapabilities["mkdir"] = oneBasenameCapabilities("mkdir") + allCapabilities["chmod"] = mergeCapabilities(buildBasenameCapabilities("chmod", "file"), processCapabilities) + allCapabilities["chown"] = mergeCapabilities(buildBasenameCapabilities("chown", "file"), processCapabilities) + allCapabilities["link"] = mergeCapabilities(buildBasenameCapabilities("link", "file", "file.destination"), processCapabilities) + allCapabilities["mkdir"] = mergeCapabilities(buildBasenameCapabilities("mkdir", "file"), processCapabilities) allCapabilities["open"] = openCapabilities - allCapabilities["rename"] = twoBasenameCapabilities("rename", "file", "file.destination") - allCapabilities["rmdir"] = oneBasenameCapabilities("rmdir") - allCapabilities["unlink"] = oneBasenameCapabilities("unlink") - allCapabilities["utimes"] = oneBasenameCapabilities("utimes") + allCapabilities["rename"] = mergeCapabilities(buildBasenameCapabilities("rename", "file", "file.destination"), processCapabilities) + allCapabilities["rmdir"] = mergeCapabilities(buildBasenameCapabilities("rmdir", "file"), processCapabilities) + allCapabilities["unlink"] = mergeCapabilities(buildBasenameCapabilities("unlink", "file"), processCapabilities) + allCapabilities["utimes"] = mergeCapabilities(buildBasenameCapabilities("utimes", "file"), processCapabilities) allCapabilities["mmap"] = mmapCapabilities allCapabilities["mprotect"] = mprotectCapabilities allCapabilities["splice"] = spliceCapabilities - allCapabilities["chdir"] = oneBasenameCapabilities("chdir") + allCapabilities["chdir"] = mergeCapabilities(buildBasenameCapabilities("chdir", "file"), processCapabilities) allCapabilities["bpf"] = bpfCapabilities } diff --git a/pkg/security/probe/kfilters/kfilters.go b/pkg/security/probe/kfilters/kfilters.go index 596d0fbd06772..b7288e6775a1e 100644 --- a/pkg/security/probe/kfilters/kfilters.go +++ b/pkg/security/probe/kfilters/kfilters.go @@ -12,7 +12,7 @@ type FilterPolicy struct { Mode PolicyMode } -// Bytes returns the binary representation of a FilterPolicy -func (f *FilterPolicy) Bytes() ([]byte, error) { +// MarshalBinary returns the binary representation of a FilterPolicy +func (f *FilterPolicy) MarshalBinary() ([]byte, error) { return []byte{uint8(f.Mode)}, nil } diff --git a/pkg/security/probe/kfilters/kfilters_bpf.go b/pkg/security/probe/kfilters/kfilters_bpf.go index da8a0c4251c3b..1714cb24439a4 100644 --- a/pkg/security/probe/kfilters/kfilters_bpf.go +++ b/pkg/security/probe/kfilters/kfilters_bpf.go @@ -21,6 +21,7 @@ type activeKFilter interface { Apply(*manager.Manager) error Key() interface{} GetTableName() string + GetApproverType() string } // ActiveKFilters defines kfilter map @@ -66,24 +67,43 @@ type entryKey struct { key interface{} } +func makeEntryKey(tableName string, tableKey interface{}) entryKey { + mb, ok := tableKey.(encoding.BinaryMarshaler) + if !ok { + return entryKey{ + tableName: tableName, + key: tableKey, + } + } + + data, _ := mb.MarshalBinary() + + return entryKey{ + tableName: tableName, + key: hex.EncodeToString(data), + } +} + type arrayEntry struct { - tableName string - index interface{} - value interface{} - zeroValue interface{} + approverType string + tableName string + index interface{} + value interface{} + zeroValue interface{} } func (e *arrayEntry) Key() interface{} { - return entryKey{ - tableName: e.tableName, - key: e.index, - } + return makeEntryKey(e.tableName, e.index) } func (e *arrayEntry) GetTableName() string { return e.tableName } +func (e *arrayEntry) GetApproverType() string { + return e.approverType +} + func (e *arrayEntry) Remove(manager *manager.Manager) error { table, err := managerhelper.Map(manager, e.tableName) if err != nil { @@ -101,32 +121,24 @@ func (e *arrayEntry) Apply(manager *manager.Manager) error { } type eventMaskEntry struct { - tableName string - tableKey interface{} - eventMask uint64 + approverType string + tableName string + tableKey interface{} + eventMask uint64 } func (e *eventMaskEntry) Key() interface{} { - mb, ok := e.tableKey.(encoding.BinaryMarshaler) - if !ok { - return entryKey{ - tableName: e.tableName, - key: e.tableKey, - } - } - - data, _ := mb.MarshalBinary() - - return entryKey{ - tableName: e.tableName, - key: hex.EncodeToString(data), - } + return makeEntryKey(e.tableName, e.tableKey) } func (e *eventMaskEntry) GetTableName() string { return e.tableName } +func (e *eventMaskEntry) GetApproverType() string { + return e.approverType +} + func (e *eventMaskEntry) Remove(manager *manager.Manager) error { table, err := managerhelper.Map(manager, e.tableName) if err != nil { @@ -154,3 +166,38 @@ func (e *eventMaskEntry) Apply(manager *manager.Manager) error { } return table.Put(e.tableKey, eventMask) } + +type hashEntry struct { + approverType string + tableName string + tableKey interface{} + value interface{} +} + +func (e *hashEntry) Key() interface{} { + return makeEntryKey(e.tableName, e.tableKey) +} + +func (e *hashEntry) GetTableName() string { + return e.tableName +} + +func (e *hashEntry) GetApproverType() string { + return e.approverType +} + +func (e *hashEntry) Remove(manager *manager.Manager) error { + table, err := managerhelper.Map(manager, e.tableName) + if err != nil { + return err + } + return table.Delete(e.tableKey) +} + +func (e *hashEntry) Apply(manager *manager.Manager) error { + table, err := managerhelper.Map(manager, e.tableName) + if err != nil { + return err + } + return table.Put(e.tableKey, e.value) +} diff --git a/pkg/security/probe/kfilters/mmap.go b/pkg/security/probe/kfilters/mmap.go index 130ed05e8a8a6..e17f2e60809be 100644 --- a/pkg/security/probe/kfilters/mmap.go +++ b/pkg/security/probe/kfilters/mmap.go @@ -9,8 +9,6 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -36,30 +34,29 @@ var mmapCapabilities = rules.FieldCapabilities{ }, } -func mmapKFilters(approvers rules.Approvers) (ActiveKFilters, error) { - mmapKFilters, err := getBasenameKFilters(model.MMapEventType, "file", approvers) +func mmapKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + kfilters, fieldHandled, err := getBasenameKFilters(model.MMapEventType, "file", approvers) if err != nil { - return nil, err + return nil, nil, err } for field, values := range approvers { switch field { - case "mmap.file.name", "mmap.file.path": // already handled by getBasenameKFilters case "mmap.flags": kfilter, err := getFlagsKFilter("mmap_flags_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - mmapKFilters = append(mmapKFilters, kfilter) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) case "mmap.protection": kfilter, err := getFlagsKFilter("mmap_protection_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - mmapKFilters = append(mmapKFilters, kfilter) - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } } - return newActiveKFilters(mmapKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/kfilters/mprotect.go b/pkg/security/probe/kfilters/mprotect.go index dc1c10be3532b..e721683c748cc 100644 --- a/pkg/security/probe/kfilters/mprotect.go +++ b/pkg/security/probe/kfilters/mprotect.go @@ -9,8 +9,6 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) @@ -26,26 +24,29 @@ var mprotectCapabilities = rules.FieldCapabilities{ }, } -func mprotectKFilters(approvers rules.Approvers) (ActiveKFilters, error) { - var mprotectKFilters []activeKFilter +func mprotectKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + var ( + kfilters []activeKFilter + fieldHandled []eval.Field + ) for field, values := range approvers { switch field { case "mprotect.vm_protection": kfilter, err := getFlagsKFilter("mprotect_vm_protection_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - mprotectKFilters = append(mprotectKFilters, kfilter) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) case "mprotect.req_protection": kfilter, err := getFlagsKFilter("mprotect_req_protection_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - mprotectKFilters = append(mprotectKFilters, kfilter) - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } } - return newActiveKFilters(mprotectKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/kfilters/open.go b/pkg/security/probe/kfilters/open.go index 5a4ad806bb8ec..412524252d119 100644 --- a/pkg/security/probe/kfilters/open.go +++ b/pkg/security/probe/kfilters/open.go @@ -9,52 +9,56 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -var openCapabilities = rules.FieldCapabilities{ - { - Field: "open.flags", - TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, - }, - { - Field: "open.file.path", - TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, - FilterWeight: 15, +var openCapabilities = mergeCapabilities( + rules.FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + }, + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, + ValidateFnc: validateBasenameFilter, + FilterWeight: 15, + }, + { + Field: "open.file.name", + TypeBitmask: eval.ScalarValueType, + FilterWeight: 300, + }, }, - { - Field: "open.file.name", - TypeBitmask: eval.ScalarValueType, - FilterWeight: 10, - }, -} + processCapabilities, +) -func openOnNewApprovers(approvers rules.Approvers) (ActiveKFilters, error) { - openKFilters, err := getBasenameKFilters(model.FileOpenEventType, "file", approvers) +func openKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + kfilters, fieldHandled, err := getBasenameKFilters(model.FileOpenEventType, "file", approvers) if err != nil { - return nil, err + return nil, nil, err } for field, values := range approvers { switch field { - case "open.file.name", "open.file.path": // already handled by getBasenameKFilters case "open.flags": kfilter, err := getFlagsKFilter("open_flags_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - openKFilters = append(openKFilters, kfilter) - - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } + } + kfs, handled, err := getProcessKFilters(model.FileOpenEventType, approvers) + if err != nil { + return nil, nil, err } + kfilters = append(kfilters, kfs...) + fieldHandled = append(fieldHandled, handled...) - return newActiveKFilters(openKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/kfilters/process.go b/pkg/security/probe/kfilters/process.go new file mode 100644 index 0000000000000..14d538765d882 --- /dev/null +++ b/pkg/security/probe/kfilters/process.go @@ -0,0 +1,95 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package kfilters holds kfilters related files +package kfilters + +import ( + "github.com/DataDog/datadog-agent/pkg/security/ebpf" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) + +const ( + auidField = "process.auid" + maxAUID = model.AuditUIDUnset - 1 + auidApproversTable = "auid_approvers" + auidRangeApproversTable = "auid_range_approvers" +) + +var processCapabilities = rules.FieldCapabilities{ + { + Field: "process.auid", + TypeBitmask: eval.ScalarValueType | eval.RangeValueType, + FilterMode: rules.ApproverOnlyMode, + RangeFilterValue: &rules.RangeFilterValue{Min: 0, Max: maxAUID}, + FilterWeight: 100, + // convert `!= model.AuditUIDUnset`` to the max range + HandleNotApproverValue: func(fieldValueType eval.FieldValueType, value interface{}) (eval.FieldValueType, interface{}, bool) { + if fieldValueType != eval.ScalarValueType { + return fieldValueType, value, false + } + + if i, ok := value.(int); ok && uint32(i) == model.AuditUIDUnset { + return eval.RangeValueType, rules.RangeFilterValue{Min: 0, Max: model.AuditUIDUnset - 1}, true + } + + return fieldValueType, value, false + }, + }, +} + +func getProcessKFilters(eventType model.EventType, approvers rules.Approvers) ([]activeKFilter, []eval.Field, error) { + var fieldHandled []eval.Field + + values, exists := approvers[auidField] + if !exists { + return nil, nil, nil + } + + var ( + kfilters []activeKFilter + auidRange = rules.RangeFilterValue{Min: 0, Max: maxAUID} + auidRangeSet bool + ) + + for _, value := range values { + switch value.Type { + case eval.ScalarValueType: + kfilters = append(kfilters, &eventMaskEntry{ + tableName: auidApproversTable, + tableKey: ebpf.Uint32MapItem(value.Value.(int)), + eventMask: uint64(1 << (eventType - 1)), + }) + case eval.RangeValueType: + min, max := value.Value.(rules.RangeFilterValue).Min, value.Value.(rules.RangeFilterValue).Max + if !auidRangeSet || auidRange.Min > min { + auidRange.Min = min + } + if !auidRangeSet || auidRange.Max < max { + auidRange.Max = max + } + auidRangeSet = true + } + } + + if auidRangeSet { + kfilters = append(kfilters, &hashEntry{ + approverType: AUIDApproverType, + tableName: auidRangeApproversTable, + tableKey: eventType, + value: ebpf.NewUInt32RangeMapItem(uint32(auidRange.Min), uint32(auidRange.Max)), + }) + } + + if len(kfilters) > 0 { + fieldHandled = append(fieldHandled, auidField) + } + + return kfilters, fieldHandled, nil +} diff --git a/pkg/security/probe/kfilters/splice.go b/pkg/security/probe/kfilters/splice.go index 2d6e9a4573ec8..bf49f7cb3b024 100644 --- a/pkg/security/probe/kfilters/splice.go +++ b/pkg/security/probe/kfilters/splice.go @@ -9,8 +9,6 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -36,30 +34,29 @@ var spliceCapabilities = rules.FieldCapabilities{ }, } -func spliceKFilters(approvers rules.Approvers) (ActiveKFilters, error) { - spliceKFilters, err := getBasenameKFilters(model.SpliceEventType, "file", approvers) +func spliceKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + kfilters, fieldHandled, err := getBasenameKFilters(model.SpliceEventType, "file", approvers) if err != nil { - return nil, err + return nil, nil, err } for field, values := range approvers { switch field { - case "splice.file.name", "splice.file.path": // already handled by getBasenameKFilters case "splice.pipe_entry_flag": kfilter, err := getFlagsKFilter("splice_entry_flags_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - spliceKFilters = append(spliceKFilters, kfilter) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) case "splice.pipe_exit_flag": kfilter, err := getFlagsKFilter("splice_exit_flags_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - spliceKFilters = append(spliceKFilters, kfilter) - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } } - return newActiveKFilters(spliceKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/monitors/approver/approver_monitor.go b/pkg/security/probe/monitors/approver/approver_monitor.go index 7f6443d1df63b..fc863bdfc07f8 100644 --- a/pkg/security/probe/monitors/approver/approver_monitor.go +++ b/pkg/security/probe/monitors/approver/approver_monitor.go @@ -14,6 +14,7 @@ import ( manager "github.com/DataDog/ebpf-manager" lib "github.com/cilium/ebpf" + "github.com/DataDog/datadog-agent/pkg/security/probe/kfilters" "github.com/DataDog/datadog-agent/pkg/security/probe/managerhelper" "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -28,6 +29,7 @@ import ( type Stats struct { EventApprovedByBasename uint64 EventApprovedByFlag uint64 + EventApprovedByAUID uint64 } // Monitor defines an approver monitor @@ -58,26 +60,35 @@ func (d *Monitor) SendStats() error { for _, stat := range statsAcrossAllCPUs { statsByEventType[eventType].EventApprovedByBasename += stat.EventApprovedByBasename statsByEventType[eventType].EventApprovedByFlag += stat.EventApprovedByFlag + statsByEventType[eventType].EventApprovedByAUID += stat.EventApprovedByAUID } } for eventType, stats := range statsByEventType { - if stats.EventApprovedByBasename == 0 && stats.EventApprovedByFlag == 0 { - continue - } - eventTypeTag := fmt.Sprintf("event_type:%s", model.EventType(eventType).String()) - tagsForBasenameApprovedEvents := []string{ - "approver_type:basename", - eventTypeTag, + if stats.EventApprovedByBasename != 0 { + tagsForBasenameApprovedEvents := []string{ + "approver_type:" + kfilters.BasenameApproverType, + eventTypeTag, + } + _ = d.statsdClient.Count(metrics.MetricEventApproved, int64(stats.EventApprovedByBasename), tagsForBasenameApprovedEvents, 1.0) } - tagsForFlagApprovedEvents := []string{ - "approver_type:flag", - eventTypeTag, + + if stats.EventApprovedByFlag != 0 { + tagsForFlagApprovedEvents := []string{ + "approver_type:" + kfilters.FlagApproverType, + eventTypeTag, + } + _ = d.statsdClient.Count(metrics.MetricEventApproved, int64(stats.EventApprovedByFlag), tagsForFlagApprovedEvents, 1.0) } - _ = d.statsdClient.Count(metrics.MetricEventApproved, int64(stats.EventApprovedByBasename), tagsForBasenameApprovedEvents, 1.0) - _ = d.statsdClient.Count(metrics.MetricEventApproved, int64(stats.EventApprovedByFlag), tagsForFlagApprovedEvents, 1.0) + if stats.EventApprovedByAUID != 0 { + tagsForAUIDApprovedEvents := []string{ + "approver_type:" + kfilters.AUIDApproverType, + eventTypeTag, + } + _ = d.statsdClient.Count(metrics.MetricEventApproved, int64(stats.EventApprovedByAUID), tagsForAUIDApprovedEvents, 1.0) + } } for i := uint32(0); i != uint32(model.LastApproverEventType); i++ { _ = buffer.Put(i, d.statsZero) diff --git a/pkg/security/probe/opts_others.go b/pkg/security/probe/opts_others.go new file mode 100644 index 0000000000000..59cf8dccb1453 --- /dev/null +++ b/pkg/security/probe/opts_others.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !linux && !windows + +// Package probe holds probe related files +package probe + +// Opts defines some probe options +type Opts struct { + // DontDiscardRuntime do not discard the runtime. Mostly used by functional tests + DontDiscardRuntime bool +} diff --git a/pkg/security/probe/opts_windows.go b/pkg/security/probe/opts_windows.go index 7558f1758456b..8d079df75832f 100644 --- a/pkg/security/probe/opts_windows.go +++ b/pkg/security/probe/opts_windows.go @@ -14,6 +14,9 @@ import ( // Opts defines some probe options type Opts struct { + // DontDiscardRuntime do not discard the runtime. Mostly used by functional tests + DontDiscardRuntime bool + // StatsdClient to be used for probe stats StatsdClient statsd.ClientInterface diff --git a/pkg/security/probe/probe.go b/pkg/security/probe/probe.go index 549f56d5c2a0c..0f550b77dec2c 100644 --- a/pkg/security/probe/probe.go +++ b/pkg/security/probe/probe.go @@ -55,6 +55,8 @@ type PlatformProbe interface { AddDiscarderPushedCallback(_ DiscarderPushedCallback) GetEventTags(_ string) []string GetProfileManager() interface{} + EnableEnforcement(bool) + PlaySnapshot() } // EventHandler represents a handler for events sent by the probe that needs access to all the fields in the SECL model @@ -103,6 +105,11 @@ type CustomEventHandler interface { // DiscarderPushedCallback describe the callback used to retrieve pushed discarders information type DiscarderPushedCallback func(eventType string, event *model.Event, field string) +type actionStatsTags struct { + ruleID rules.RuleID + actionName rules.ActionName +} + // Probe represents the runtime security eBPF probe in charge of // setting up the required kProbes and decoding events sent from the kernel type Probe struct { @@ -125,14 +132,19 @@ type Probe struct { eventHandlers [model.MaxAllEventType][]EventHandler eventConsumers [model.MaxAllEventType][]*EventConsumer customEventHandlers [model.MaxAllEventType][]CustomEventHandler + + // stats + ruleActionStatsLock sync.RWMutex + ruleActionStats map[actionStatsTags]*atomic.Int64 } func newProbe(config *config.Config, opts Opts) *Probe { return &Probe{ - Opts: opts, - Config: config, - StatsdClient: opts.StatsdClient, - scrubber: newProcScrubber(config.Probe.CustomSensitiveWords), + Opts: opts, + Config: config, + StatsdClient: opts.StatsdClient, + scrubber: newProcScrubber(config.Probe.CustomSensitiveWords), + ruleActionStats: make(map[actionStatsTags]*atomic.Int64), } } @@ -179,6 +191,20 @@ func (p *Probe) SendStats() error { if err := p.sendConsumerStats(); err != nil { return err } + + p.ruleActionStatsLock.RLock() + for tags, counter := range p.ruleActionStats { + count := counter.Swap(0) + if count > 0 { + tags := []string{ + fmt.Sprintf("rule_id:%s", tags.ruleID), + fmt.Sprintf("action_name:%s", tags.actionName), + } + _ = p.StatsdClient.Count(metrics.MetricRuleActionPerformed, count, tags, 1.0) + } + } + p.ruleActionStatsLock.RUnlock() + return p.PlatformProbe.SendStats() } @@ -203,6 +229,9 @@ func (p *Probe) FlushDiscarders() error { // ApplyRuleSet setup the probes for the provided set of rules and returns the policy report. func (p *Probe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetReport, error) { + p.ruleActionStatsLock.Lock() + clear(p.ruleActionStats) + p.ruleActionStatsLock.Unlock() return p.PlatformProbe.ApplyRuleSet(rs) } @@ -363,6 +392,24 @@ func (p *Probe) GetService(ev *model.Event) string { return p.Config.RuntimeSecurity.HostServiceName } +func (p *Probe) onRuleActionPerformed(rule *rules.Rule, action *rules.ActionDefinition) { + p.ruleActionStatsLock.Lock() + defer p.ruleActionStatsLock.Unlock() + + tags := actionStatsTags{ + ruleID: rule.ID, + actionName: action.Name(), + } + + var counter *atomic.Int64 + if counter = p.ruleActionStats[tags]; counter == nil { + counter = atomic.NewInt64(1) + p.ruleActionStats[tags] = counter + } else { + counter.Inc() + } +} + // NewRuleSet returns a new ruleset func (p *Probe) NewRuleSet(eventTypeEnabled map[eval.EventType]bool) *rules.RuleSet { ruleOpts, evalOpts := rules.NewBothOpts(eventTypeEnabled) @@ -370,6 +417,7 @@ func (p *Probe) NewRuleSet(eventTypeEnabled map[eval.EventType]bool) *rules.Rule ruleOpts.WithReservedRuleIDs(events.AllCustomRuleIDs()) ruleOpts.WithSupportedDiscarders(SupportedDiscarders) ruleOpts.WithSupportedMultiDiscarder(SupportedMultiDiscarder) + ruleOpts.WithRuleActionPerformedCb(p.onRuleActionPerformed) eventCtor := func() eval.Event { return p.PlatformProbe.NewEvent() @@ -397,3 +445,13 @@ func (p *Probe) IsActivityDumpTagRulesEnabled() bool { func (p *Probe) IsSecurityProfileEnabled() bool { return p.Config.RuntimeSecurity.SecurityProfileEnabled } + +// EnableEnforcement sets the enforcement mode +func (p *Probe) EnableEnforcement(state bool) { + p.PlatformProbe.EnableEnforcement(state) +} + +// PlaySnapshot plays the snapshot +func (p *Probe) PlaySnapshot() { + p.PlatformProbe.PlaySnapshot() +} diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 86be48d856c76..af46bf5c0d97e 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -32,7 +32,6 @@ import ( "github.com/DataDog/ebpf-manager/tracefs" "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/config/env" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" @@ -149,6 +148,9 @@ type EBPFProbe struct { // On demand onDemandManager *OnDemandProbesManager onDemandRateLimiter *rate.Limiter + + // hash action + fileHasher *FileHasher } // GetProfileManager returns the Profile Managers @@ -364,7 +366,7 @@ func (p *EBPFProbe) Setup() error { // Start the probe func (p *EBPFProbe) Start() error { // Apply rules to the snapshotted data before starting the event stream to avoid concurrency issues - p.playSnapshot() + p.PlaySnapshot() // start new tc classifier loop go p.startSetupNewTCClassifierLoop() @@ -372,8 +374,9 @@ func (p *EBPFProbe) Start() error { return p.eventStream.Start(&p.wg) } -// playSnapshot plays a snapshot -func (p *EBPFProbe) playSnapshot() { +// PlaySnapshot plays a snapshot +func (p *EBPFProbe) PlaySnapshot() { + seclog.Debugf("playing the snapshot") // Get the snapshotted data var events []*model.Event @@ -467,6 +470,8 @@ func (p *EBPFProbe) DispatchEvent(event *model.Event) { func (p *EBPFProbe) SendStats() error { p.Resolvers.TCResolver.SendTCProgramsStats(p.statsdClient) + p.processKiller.SendStats(p.statsdClient) + if err := p.profileManagers.SendStats(); err != nil { return err } @@ -644,6 +649,33 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { p.profileManagers.activityDumpManager.HandleCGroupTracingEvent(&event.CgroupTracing) return + case model.CgroupWriteEventType: + if _, err = event.CgroupWrite.UnmarshalBinary(data[offset:]); err != nil { + seclog.Errorf("failed to decode cgroup write released event: %s (offset %d, len %d)", err, offset, dataLen) + return + } + pce := p.Resolvers.ProcessResolver.Resolve(event.CgroupWrite.Pid, event.CgroupWrite.Pid, 0, false) + if pce != nil { + path, err := p.Resolvers.DentryResolver.Resolve(event.CgroupWrite.File.PathKey, true) + if err == nil && path != "" { + path = filepath.Dir(string(path)) + pce.CGroup.CGroupID = containerutils.CGroupID(path) + pce.Process.CGroup.CGroupID = containerutils.CGroupID(path) + cgroupFlags := containerutils.CGroupFlags(event.CgroupWrite.CGroupFlags) + if cgroupFlags.IsContainer() { + containerID, _ := containerutils.GetContainerFromCgroup(path) + pce.ContainerID = containerutils.ContainerID(containerID) + pce.Process.ContainerID = containerutils.ContainerID(containerID) + } + pce.CGroup.CGroupFlags = cgroupFlags + pce.Process.CGroup = pce.CGroup + } else { + seclog.Debugf("failed to resolve cgroup file %v", event.CgroupWrite.File) + } + } else { + seclog.Debugf("failed to resolve process of cgroup write event: %s", err) + } + return case model.UnshareMountNsEventType: if _, err = event.UnshareMountNS.UnmarshalBinary(data[offset:]); err != nil { seclog.Errorf("failed to decode unshare mnt ns event: %s (offset %d, len %d)", err, offset, dataLen) @@ -716,24 +748,6 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { } switch eventType { - case model.CgroupWriteEventType: - if _, err = event.CgroupWrite.UnmarshalBinary(data[offset:]); err != nil { - seclog.Errorf("failed to decode cgroup write released event: %s (offset %d, len %d)", err, offset, dataLen) - return - } - path, err := p.Resolvers.DentryResolver.Resolve(event.CgroupWrite.File.PathKey, true) - if err == nil && path != "" { - path = filepath.Dir(string(path)) - event.ProcessCacheEntry.CGroup.CGroupID = containerutils.CGroupID(path) - event.ProcessCacheEntry.Process.CGroup.CGroupID = containerutils.CGroupID(path) - containerID, cgroupFlags := containerutils.GetContainerFromCgroup(path) - event.ProcessCacheEntry.ContainerID = containerutils.ContainerID(containerID) - event.ProcessCacheEntry.Process.ContainerID = containerutils.ContainerID(containerID) - event.ProcessCacheEntry.CGroup.CGroupFlags = cgroupFlags - event.ProcessCacheEntry.Process.CGroup = event.ProcessCacheEntry.CGroup - } else { - seclog.Debugf("failed to resolve cgroup file %v", event.CgroupWrite.File) - } case model.FileMountEventType: if _, err = event.Mount.UnmarshalBinary(data[offset:]); err != nil { @@ -854,8 +868,9 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { // update mount pid mapping p.Resolvers.MountResolver.DelPid(event.Exit.Pid) - // update kill action reports + // update action reports p.processKiller.HandleProcessExited(event) + p.fileHasher.HandleProcessExited(event) case model.SetuidEventType: // the process context may be incorrect, do not modify it if event.Error != nil { @@ -999,7 +1014,8 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { } case model.IMDSEventType: if _, err = event.IMDS.UnmarshalBinary(data[offset:]); err != nil { - seclog.Errorf("failed to decode IMDS event: %s (offset %d, len %d)", err, offset, len(data)) + // it's very possible we can't parse the IMDS body, as such let's put it as debug for now + seclog.Debugf("failed to decode IMDS event: %s (offset %d, len %d)", err, offset, len(data)) return } defer p.Resolvers.ProcessResolver.UpdateAWSSecurityCredentials(event.PIDContext.Pid, event) @@ -1035,8 +1051,9 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { p.Resolvers.ProcessResolver.DeleteEntry(event.ProcessContext.Pid, event.ResolveEventTime()) } - // flush pending kill actions + // flush pending actions p.processKiller.FlushPendingReports() + p.fileHasher.FlushPendingReports() } // AddDiscarderPushedCallback add a callback to the list of func that have to be called when a discarder is pushed to kernel @@ -1101,16 +1118,20 @@ func (p *EBPFProbe) ApplyFilterPolicy(eventType eval.EventType, mode kfilters.Po return table.Put(ebpf.Uint32MapItem(et), policy) } -// SetApprovers applies approvers and removes the unused ones -func (p *EBPFProbe) SetApprovers(eventType eval.EventType, approvers rules.Approvers) error { +// setApprovers applies approvers and removes the unused ones +func (p *EBPFProbe) setApprovers(eventType eval.EventType, approvers rules.Approvers) error { kfiltersGetter, exists := kfilters.KFilterGetters[eventType] if !exists { return nil } - newKFilters, err := kfiltersGetter(approvers) + newKFilters, fieldHandled, err := kfiltersGetter(approvers) if err != nil { - seclog.Errorf("Error while adding approvers fallback in-kernel policy to `%s` for `%s`: %s", kfilters.PolicyModeAccept, eventType, err) + return err + } + + if len(approvers) != len(fieldHandled) { + return fmt.Errorf("all the approvers should be handled : %v vs %v", approvers, fieldHandled) } type tag struct { @@ -1125,7 +1146,7 @@ func (p *EBPFProbe) SetApprovers(eventType eval.EventType, approvers rules.Appro return err } - approverType := getApproverType(newKFilter.GetTableName()) + approverType := newKFilter.GetApproverType() approverAddedMetricCounter[tag{eventType, approverType}]++ } @@ -1137,7 +1158,7 @@ func (p *EBPFProbe) SetApprovers(eventType eval.EventType, approvers rules.Appro return err } - approverType := getApproverType(previousKFilter.GetTableName()) + approverType := previousKFilter.GetApproverType() approverAddedMetricCounter[tag{eventType, approverType}]-- if approverAddedMetricCounter[tag{eventType, approverType}] <= 0 { delete(approverAddedMetricCounter, tag{eventType, approverType}) @@ -1160,16 +1181,6 @@ func (p *EBPFProbe) SetApprovers(eventType eval.EventType, approvers rules.Appro return nil } -func getApproverType(tableName string) string { - approverType := "flag" - - if tableName == kfilters.BasenameApproverKernelMapName { - approverType = "basename" - } - - return approverType -} - func (p *EBPFProbe) isNeededForActivityDump(eventType eval.EventType) bool { if p.config.RuntimeSecurity.ActivityDumpEnabled { for _, e := range p.profileManagers.GetActivityDumpTracedEventTypes() { @@ -1206,17 +1217,17 @@ func (p *EBPFProbe) validEventTypeForConfig(eventType string) bool { // of the applied approvers for it. func (p *EBPFProbe) updateProbes(ruleEventTypes []eval.EventType, needRawSyscalls bool) error { // event types enabled either by event handlers or by rules - eventTypes := append([]eval.EventType{}, defaultEventTypes...) - eventTypes = append(eventTypes, ruleEventTypes...) + requestedEventTypes := append([]eval.EventType{}, defaultEventTypes...) + requestedEventTypes = append(requestedEventTypes, ruleEventTypes...) for eventType, handlers := range p.probe.eventHandlers { if len(handlers) == 0 { continue } - if slices.Contains(eventTypes, model.EventType(eventType).String()) { + if slices.Contains(requestedEventTypes, model.EventType(eventType).String()) { continue } if eventType != int(model.UnknownEventType) && eventType != int(model.MaxAllEventType) { - eventTypes = append(eventTypes, model.EventType(eventType).String()) + requestedEventTypes = append(requestedEventTypes, model.EventType(eventType).String()) } } @@ -1224,8 +1235,13 @@ func (p *EBPFProbe) updateProbes(ruleEventTypes []eval.EventType, needRawSyscall // extract probe to activate per the event types for eventType, selectors := range probes.GetSelectorsPerEventType(p.useFentry) { - if (eventType == "*" || slices.Contains(eventTypes, eventType) || p.isNeededForActivityDump(eventType) || p.isNeededForSecurityProfile(eventType) || p.config.Probe.EnableAllProbes) && p.validEventTypeForConfig(eventType) { + if (eventType == "*" || slices.Contains(requestedEventTypes, eventType) || p.isNeededForActivityDump(eventType) || p.isNeededForSecurityProfile(eventType) || p.config.Probe.EnableAllProbes) && p.validEventTypeForConfig(eventType) { activatedProbes = append(activatedProbes, selectors...) + + // to ensure the `enabled_events` map is correctly set with events that are enabled because of ADs + if !slices.Contains(requestedEventTypes, eventType) { + requestedEventTypes = append(requestedEventTypes, eventType) + } } } @@ -1290,7 +1306,7 @@ func (p *EBPFProbe) updateProbes(ruleEventTypes []eval.EventType, needRawSyscall } enabledEvents := uint64(0) - for _, eventName := range eventTypes { + for _, eventName := range requestedEventTypes { if eventName != "*" { eventType := config.ParseEvalEventType(eventName) if eventType == model.UnknownEventType { @@ -1422,6 +1438,9 @@ func (err QueuedNetworkDeviceError) Error() string { func (p *EBPFProbe) pushNewTCClassifierRequest(device model.NetDevice) { select { + case <-p.ctx.Done(): + // the probe is stopping, do not push the new tc classifier request + return case p.newTCNetDevices <- device: // do nothing default: @@ -1561,11 +1580,16 @@ func (p *EBPFProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetRepor } for eventType, report := range ars.Policies { - if err := p.ApplyFilterPolicy(eventType, report.Mode); err != nil { - return nil, err - } - if err := p.SetApprovers(eventType, report.Approvers); err != nil { - return nil, err + if err := p.setApprovers(eventType, report.Approvers); err != nil { + seclog.Errorf("Error while adding approvers fallback in-kernel policy to `%s` for `%s`: %s", kfilters.PolicyModeAccept, eventType, err) + + if err := p.ApplyFilterPolicy(eventType, kfilters.PolicyModeAccept); err != nil { + return nil, err + } + } else { + if err := p.ApplyFilterPolicy(eventType, report.Mode); err != nil { + return nil, err + } } } @@ -1574,7 +1598,7 @@ func (p *EBPFProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetRepor // activity dump & security profiles needRawSyscalls := p.isNeededForActivityDump(model.SyscallsEventType.String()) - p.processKiller.Reset() + p.processKiller.Apply(rs) // kill action if p.config.RuntimeSecurity.EnforcementEnabled && isKillActionPresent(rs) { @@ -1628,8 +1652,13 @@ func (p *EBPFProbe) DumpProcessCache(withArgs bool) (string, error) { return p.Resolvers.ProcessResolver.ToDot(withArgs) } +// EnableEnforcement sets the enforcement mode +func (p *EBPFProbe) EnableEnforcement(state bool) { + p.processKiller.SetState(state) +} + // NewEBPFProbe instantiates a new runtime security agent probe -func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, wmeta workloadmeta.Component, telemetry telemetry.Component) (*EBPFProbe, error) { +func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, telemetry telemetry.Component) (*EBPFProbe, error) { nerpc, err := erpc.NewERPC() if err != nil { return nil, err @@ -1885,11 +1914,13 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, wmeta workload TTYFallbackEnabled: probe.Opts.TTYFallbackEnabled, } - p.Resolvers, err = resolvers.NewEBPFResolvers(config, p.Manager, probe.StatsdClient, probe.scrubber, p.Erpc, resolversOpts, wmeta, telemetry) + p.Resolvers, err = resolvers.NewEBPFResolvers(config, p.Manager, probe.StatsdClient, probe.scrubber, p.Erpc, resolversOpts, telemetry) if err != nil { return nil, err } + p.fileHasher = NewFileHasher(config, p.Resolvers.HashResolver) + hostname, err := utils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" @@ -2165,14 +2196,17 @@ func (p *EBPFProbe) HandleActions(ctx *eval.Context, rule *rules.Rule) { return } - p.processKiller.KillAndReport(action.Def.Kill.Scope, action.Def.Kill.Signal, rule, ev, func(pid uint32, sig uint32) error { + if p.processKiller.KillAndReport(action.Def.Kill, rule, ev, func(pid uint32, sig uint32) error { if p.supportsBPFSendSignal { if err := p.killListMap.Put(uint32(pid), uint32(sig)); err != nil { seclog.Warnf("failed to kill process with eBPF %d: %s", pid, err) } } return p.processKiller.KillFromUserspace(pid, sig, ev) - }) + }) { + p.probe.onRuleActionPerformed(rule, action.Def) + } + case action.Def.CoreDump != nil: if p.config.RuntimeSecurity.InternalMonitoringEnabled { dump := NewCoreDump(action.Def.CoreDump, p.Resolvers, serializers.NewEventSerializer(ev, nil)) @@ -2180,10 +2214,12 @@ func (p *EBPFProbe) HandleActions(ctx *eval.Context, rule *rules.Rule) { event := events.NewCustomEvent(model.UnknownEventType, dump) p.probe.DispatchCustomEvent(rule, event) + p.probe.onRuleActionPerformed(rule, action.Def) } case action.Def.Hash != nil: - // force the resolution as it will force the hash resolution as well - ev.ResolveFields() + if p.fileHasher.HashAndReport(rule, ev) { + p.probe.onRuleActionPerformed(rule, action.Def) + } } } } diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index 482710eaea3ca..8cdd164cb3655 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -77,8 +77,13 @@ type EBPFLessProbe struct { cancelFnc context.CancelFunc fieldHandlers *EBPFLessFieldHandlers clients map[net.Conn]*client - processKiller *ProcessKiller wg sync.WaitGroup + + // kill action + processKiller *ProcessKiller + + // hash action + fileHasher *FileHasher } // GetProfileManager returns the Profile Managers @@ -325,14 +330,16 @@ func (p *EBPFLessProbe) handleSyscallMsg(cl *client, syscallMsg *ebpfless.Syscal event.Exit.Code = syscallMsg.Exit.Code defer p.Resolvers.ProcessResolver.DeleteEntry(process.CacheResolverKey{Pid: syscallMsg.PID, NSID: cl.nsID}, event.ProcessContext.ExitTime) - // update kill action reports + // update action reports p.processKiller.HandleProcessExited(event) + p.fileHasher.HandleProcessExited(event) } p.DispatchEvent(event) - // flush pending kill actions + // flush pending actions p.processKiller.FlushPendingReports() + p.fileHasher.FlushPendingReports() } // DispatchEvent sends an event to the probe event handler @@ -558,6 +565,7 @@ func (p *EBPFLessProbe) NewModel() *model.Model { // SendStats send the stats func (p *EBPFLessProbe) SendStats() error { + p.processKiller.SendStats(p.statsdClient) return nil } @@ -572,8 +580,8 @@ func (p *EBPFLessProbe) FlushDiscarders() error { } // ApplyRuleSet applies the new ruleset -func (p *EBPFLessProbe) ApplyRuleSet(_ *rules.RuleSet) (*kfilters.ApplyRuleSetReport, error) { - p.processKiller.Reset() +func (p *EBPFLessProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetReport, error) { + p.processKiller.Apply(rs) return &kfilters.ApplyRuleSetReport{}, nil } @@ -593,12 +601,15 @@ func (p *EBPFLessProbe) HandleActions(ctx *eval.Context, rule *rules.Rule) { return } - p.processKiller.KillAndReport(action.Def.Kill.Scope, action.Def.Kill.Signal, rule, ev, func(pid uint32, sig uint32) error { + if p.processKiller.KillAndReport(action.Def.Kill, rule, ev, func(pid uint32, sig uint32) error { return p.processKiller.KillFromUserspace(pid, sig, ev) - }) + }) { + p.probe.onRuleActionPerformed(rule, action.Def) + } case action.Def.Hash != nil: - // force the resolution as it will force the hash resolution as well - ev.ResolveFields() + if p.fileHasher.HashAndReport(rule, ev) { + p.probe.onRuleActionPerformed(rule, action.Def) + } } } } @@ -633,6 +644,11 @@ func (p *EBPFLessProbe) zeroEvent() *model.Event { return p.event } +// EnableEnforcement sets the enforcement mode +func (p *EBPFLessProbe) EnableEnforcement(state bool) { + p.processKiller.SetState(state) +} + // NewEBPFLessProbe returns a new eBPF less probe func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts, telemetry telemetry.Component) (*EBPFLessProbe, error) { opts.normalize() @@ -667,6 +683,8 @@ func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts, telemetry return nil, err } + p.fileHasher = NewFileHasher(config, p.Resolvers.HashResolver) + hostname, err := utils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" @@ -681,3 +699,8 @@ func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts, telemetry return p, nil } + +// PlaySnapshot plays a snapshot +func (p *EBPFLessProbe) PlaySnapshot() { + // TODO: Implement this method if needed. +} diff --git a/pkg/security/probe/probe_linux.go b/pkg/security/probe/probe_linux.go index 173560f8dc302..020e7977dae4e 100644 --- a/pkg/security/probe/probe_linux.go +++ b/pkg/security/probe/probe_linux.go @@ -8,7 +8,6 @@ package probe import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/security/config" ) @@ -20,7 +19,7 @@ const ( ) // NewProbe instantiates a new runtime security agent probe -func NewProbe(config *config.Config, opts Opts, wmeta workloadmeta.Component, telemetry telemetry.Component) (*Probe, error) { +func NewProbe(config *config.Config, opts Opts, telemetry telemetry.Component) (*Probe, error) { opts.normalize() p := newProbe(config, opts) @@ -32,7 +31,7 @@ func NewProbe(config *config.Config, opts Opts, wmeta workloadmeta.Component, te } p.PlatformProbe = pp } else { - pp, err := NewEBPFProbe(p, config, opts, wmeta, telemetry) + pp, err := NewEBPFProbe(p, config, opts, telemetry) if err != nil { return nil, err } diff --git a/pkg/security/probe/probe_others.go b/pkg/security/probe/probe_others.go index 4e37810f70d5a..5e12a021aa871 100644 --- a/pkg/security/probe/probe_others.go +++ b/pkg/security/probe/probe_others.go @@ -39,6 +39,7 @@ type PlatformProbe struct { // Probe represents the runtime security probe type Probe struct { Config *config.Config + Opts Opts } // Origin returns origin @@ -107,3 +108,11 @@ func (p *Probe) RefreshUserCache(_ string) error { // HandleActions executes the actions of a triggered rule func (p *Probe) HandleActions(_ *rules.Rule, _ eval.Event) {} + +// EnableEnforcement sets the enforcement mode +func (p *Probe) EnableEnforcement(_ bool) {} + +// PlaySnapshot plays the snapshot +func (p *Probe) PlaySnapshot() { + // TODO: Implement this method if needed. +} diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index 0bff0416519fe..73b8db8c0e529 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -20,7 +20,6 @@ import ( lru "github.com/hashicorp/golang-lru/v2" "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/etw" etwimpl "github.com/DataDog/datadog-agent/comp/etw/impl" "github.com/DataDog/datadog-agent/pkg/security/config" @@ -799,7 +798,7 @@ func (p *WindowsProbe) handleProcessStart(ev *model.Event, start *procmon.Proces } - pce, err := p.Resolvers.ProcessResolver.AddNewEntry(pid, uint32(start.PPid), start.ImageFile, start.CmdLine, start.OwnerSidString) + pce, err := p.Resolvers.ProcessResolver.AddNewEntry(pid, uint32(start.PPid), start.ImageFile, start.EnvBlock, start.CmdLine, start.OwnerSidString) if err != nil { log.Errorf("error in resolver %v", err) return false @@ -1111,6 +1110,9 @@ func (p *WindowsProbe) SendStats() error { if err != nil { return err } + + p.processKiller.SendStats(p.statsdClient) + return nil } @@ -1257,7 +1259,7 @@ func (p *WindowsProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetRe } } - p.processKiller.Reset() + p.processKiller.Apply(rs) ars, err := kfilters.NewApplyRuleSetReport(p.config.Probe, rs) if err != nil { @@ -1360,9 +1362,11 @@ func (p *WindowsProbe) HandleActions(ctx *eval.Context, rule *rules.Rule) { return } - p.processKiller.KillAndReport(action.Def.Kill.Scope, action.Def.Kill.Signal, rule, ev, func(pid uint32, sig uint32) error { + if p.processKiller.KillAndReport(action.Def.Kill, rule, ev, func(pid uint32, sig uint32) error { return p.processKiller.KillFromUserspace(pid, sig, ev) - }) + }) { + p.probe.onRuleActionPerformed(rule, action.Def) + } } } } @@ -1386,8 +1390,13 @@ func (p *Probe) Origin() string { return "" } +// EnableEnforcement sets the enforcement mode +func (p *WindowsProbe) EnableEnforcement(state bool) { + p.processKiller.SetState(state) +} + // NewProbe instantiates a new runtime security agent probe -func NewProbe(config *config.Config, opts Opts, _ workloadmeta.Component, telemetry telemetry.Component) (*Probe, error) { +func NewProbe(config *config.Config, opts Opts, telemetry telemetry.Component) (*Probe, error) { opts.normalize() p := newProbe(config, opts) @@ -1423,3 +1432,8 @@ func (p *WindowsProbe) SetApprovers(_ eval.EventType, approvers rules.Approvers) return nil } + +// PlaySnapshot plays a snapshot +func (p *WindowsProbe) PlaySnapshot() { + // TODO: Implement this method if needed. +} diff --git a/pkg/security/probe/process_killer.go b/pkg/security/probe/process_killer.go index 9af3dd54c1975..ab0c9c5333000 100644 --- a/pkg/security/probe/process_killer.go +++ b/pkg/security/probe/process_killer.go @@ -10,13 +10,18 @@ package probe import ( "context" + "fmt" "slices" "sync" "time" "github.com/jellydator/ttlcache/v3" + "go.uber.org/atomic" + + "github.com/DataDog/datadog-go/v5/statsd" "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -27,7 +32,7 @@ import ( const ( defaultKillActionFlushDelay = 2 * time.Second - dismarmerCacheFlushInterval = 5 * time.Second + disarmerCacheFlushInterval = 5 * time.Second ) // ProcessKiller defines a process killer structure @@ -36,20 +41,34 @@ type ProcessKiller struct { cfg *config.Config + enabled bool pendingReports []*KillActionReport binariesExcluded []*eval.Glob sourceAllowed []string + useDisarmers *atomic.Bool + disarmerStateCh chan disarmerState ruleDisarmersLock sync.Mutex - ruleDisarmers map[rules.RuleID]*killDisarmer + ruleDisarmers map[rules.RuleID]*ruleDisarmer + + perRuleStatsLock sync.Mutex + perRuleStats map[rules.RuleID]*processKillerStats +} + +type processKillerStats struct { + processesKilled int64 } // NewProcessKiller returns a new ProcessKiller func NewProcessKiller(cfg *config.Config) (*ProcessKiller, error) { p := &ProcessKiller{ - cfg: cfg, - ruleDisarmers: make(map[rules.RuleID]*killDisarmer), - sourceAllowed: cfg.RuntimeSecurity.EnforcementRuleSourceAllowed, + cfg: cfg, + enabled: true, + useDisarmers: atomic.NewBool(false), + disarmerStateCh: make(chan disarmerState, 1), + ruleDisarmers: make(map[rules.RuleID]*ruleDisarmer), + sourceAllowed: cfg.RuntimeSecurity.EnforcementRuleSourceAllowed, + perRuleStats: make(map[rules.RuleID]*processKillerStats), } binaries := append(binariesExcluded, cfg.RuntimeSecurity.EnforcementBinaryExcluded...) @@ -66,6 +85,14 @@ func NewProcessKiller(cfg *config.Config) (*ProcessKiller, error) { return p, nil } +// SetState sets the state - enabled or disabled - for the process killer +func (p *ProcessKiller) SetState(enabled bool) { + p.Lock() + defer p.Unlock() + + p.enabled = enabled +} + // AddPendingReports add a pending reports func (p *ProcessKiller) AddPendingReports(report *KillActionReport) { p.Lock() @@ -109,166 +136,330 @@ func (p *ProcessKiller) HandleProcessExited(event *model.Event) { }) } -func (p *ProcessKiller) isKillAllowed(pids []uint32, paths []string) bool { +func (p *ProcessKiller) isKillAllowed(pids []uint32, paths []string) (bool, error) { + p.Lock() + if !p.enabled { + p.Unlock() + return false, fmt.Errorf("the enforcement capability is disabled") + } + p.Unlock() + for i, pid := range pids { if pid <= 1 || pid == utils.Getpid() { - return false + return false, fmt.Errorf("process with pid %d cannot be killed", pid) } if slices.ContainsFunc(p.binariesExcluded, func(glob *eval.Glob) bool { return glob.Matches(paths[i]) }) { - return false + return false, fmt.Errorf("process `%s`(%d) is protected", paths[i], pid) } } - return true + return true, nil } func (p *ProcessKiller) isRuleAllowed(rule *rules.Rule) bool { return slices.Contains(p.sourceAllowed, rule.Policy.Source) } -// KillAndReport kill and report -func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.Rule, ev *model.Event, killFnc func(pid uint32, sig uint32) error) { +// KillAndReport kill and report, returns true if we did try to kill +func (p *ProcessKiller) KillAndReport(kill *rules.KillDefinition, rule *rules.Rule, ev *model.Event, killFnc func(pid uint32, sig uint32) error) bool { if !p.isRuleAllowed(rule) { log.Warnf("unable to kill, the source is not allowed: %v", rule) - return + return false } entry, exists := ev.ResolveProcessCacheEntry() if !exists { - return + return false } - rsConfig := p.cfg.RuntimeSecurity - - if rsConfig.EnforcementDisarmerContainerEnabled || rsConfig.EnforcementDisarmerExecutableEnabled { - var dismarmer *killDisarmer + if p.useDisarmers.Load() { + var disarmer *ruleDisarmer p.ruleDisarmersLock.Lock() - if dismarmer = p.ruleDisarmers[rule.ID]; dismarmer == nil { - dismarmer = newKillDisarmer(rsConfig, rule.ID) - p.ruleDisarmers[rule.ID] = dismarmer + if disarmer = p.ruleDisarmers[rule.ID]; disarmer == nil { + containerParams, executableParams := p.getDisarmerParams(kill) + disarmer = newRuleDisarmer(containerParams, executableParams) + p.ruleDisarmers[rule.ID] = disarmer } p.ruleDisarmersLock.Unlock() - if rsConfig.EnforcementDisarmerContainerEnabled { + if disarmer.container.enabled { if containerID := ev.FieldHandlers.ResolveContainerID(ev, ev.ContainerContext); containerID != "" { - if !dismarmer.allow(dismarmer.containerCache, containerID, func() { - seclog.Warnf("disarming kill action of rule `%s` because more than %d different containers triggered it in the last %s", rule.ID, dismarmer.containerCache.capacity, rsConfig.EnforcementDisarmerContainerPeriod) + if !disarmer.allow(disarmer.containerCache, containerID, func() { + disarmer.disarmedCount[containerDisarmerType]++ + seclog.Warnf("disarming kill action of rule `%s` because more than %d different containers triggered it in the last %s", rule.ID, disarmer.container.capacity, disarmer.container.period) }) { seclog.Warnf("skipping kill action of rule `%s` because it has been disarmed", rule.ID) - return + return false } } } - if rsConfig.EnforcementDisarmerExecutableEnabled { + if disarmer.executable.enabled { executable := entry.Process.FileEvent.PathnameStr - if !dismarmer.allow(dismarmer.executableCache, executable, func() { - seclog.Warnf("disarmed kill action of rule `%s` because more than %d different executables triggered it in the last %s", rule.ID, dismarmer.executableCache.capacity, rsConfig.EnforcementDisarmerExecutablePeriod) + if !disarmer.allow(disarmer.executableCache, executable, func() { + disarmer.disarmedCount[executableDisarmerType]++ + seclog.Warnf("disarmed kill action of rule `%s` because more than %d different executables triggered it in the last %s", rule.ID, disarmer.executable.capacity, disarmer.executable.period) }) { seclog.Warnf("skipping kill action of rule `%s` because it has been disarmed", rule.ID) - return + return false } } } - switch scope { + scope := "process" + switch kill.Scope { case "container", "process": - default: - scope = "process" + scope = kill.Scope } pids, paths, err := p.getProcesses(scope, ev, entry) if err != nil { log.Errorf("unable to kill: %s", err) - return + return false } // if one pids is not allowed don't kill anything - if !p.isKillAllowed(pids, paths) { - log.Warnf("unable to kill, some processes are protected: %v, %v", pids, paths) - return + if killAllowed, err := p.isKillAllowed(pids, paths); !killAllowed { + log.Warnf("unable to kill: %v", err) + return false } - sig := model.SignalConstants[signal] + sig := model.SignalConstants[kill.Signal] + var processesKilled int64 killedAt := time.Now() for _, pid := range pids { - log.Debugf("requesting signal %s to be sent to %d", signal, pid) + log.Debugf("requesting signal %s to be sent to %d", kill.Signal, pid) if err := killFnc(uint32(pid), uint32(sig)); err != nil { seclog.Debugf("failed to kill process %d: %s", pid, err) + } else { + processesKilled++ } } + p.perRuleStatsLock.Lock() + var stats *processKillerStats + if stats = p.perRuleStats[rule.ID]; stats == nil { + stats = &processKillerStats{} + p.perRuleStats[rule.ID] = stats + } + stats.processesKilled += processesKilled + p.perRuleStatsLock.Unlock() + p.Lock() defer p.Unlock() report := &KillActionReport{ Scope: scope, - Signal: signal, + Signal: kill.Signal, Pid: ev.ProcessContext.Pid, CreatedAt: ev.ProcessContext.ExecTime, DetectedAt: ev.ResolveEventTime(), KilledAt: killedAt, - Rule: rule, + rule: rule, } ev.ActionReports = append(ev.ActionReports, report) p.pendingReports = append(p.pendingReports, report) + + return true } -// Reset resets the disarmer state -func (p *ProcessKiller) Reset() { +// Apply applies to ruleset to the process killer +func (p *ProcessKiller) Apply(rs *rules.RuleSet) { + if p.cfg.RuntimeSecurity.EnforcementEnabled { + var ruleSetHasKillAction bool + var rulesetHasKillDisarmer bool + + rules: + for _, rule := range rs.GetRules() { + if !p.isRuleAllowed(rule) { + continue + } + for _, action := range rule.Actions { + if action.Def.Kill == nil { + continue + } + ruleSetHasKillAction = true + if action.Def.Kill.Disarmer != nil && (action.Def.Kill.Disarmer.Container != nil || action.Def.Kill.Disarmer.Executable != nil) { + rulesetHasKillDisarmer = true + break rules + } + } + } + + configHasKillDisarmer := p.cfg.RuntimeSecurity.EnforcementDisarmerContainerEnabled || p.cfg.RuntimeSecurity.EnforcementDisarmerExecutableEnabled + if ruleSetHasKillAction && (configHasKillDisarmer || rulesetHasKillDisarmer) { + p.useDisarmers.Store(true) + p.disarmerStateCh <- running + } else { + p.useDisarmers.Store(false) + p.disarmerStateCh <- stopped + } + } + + p.perRuleStatsLock.Lock() + clear(p.perRuleStats) + p.perRuleStatsLock.Unlock() p.ruleDisarmersLock.Lock() clear(p.ruleDisarmers) p.ruleDisarmersLock.Unlock() } +// SendStats sends runtime security enforcement statistics to Datadog +func (p *ProcessKiller) SendStats(statsd statsd.ClientInterface) { + p.perRuleStatsLock.Lock() + for ruleID, stats := range p.perRuleStats { + ruleIDTag := []string{ + "rule_id:" + string(ruleID), + } + + if stats.processesKilled > 0 { + _ = statsd.Count(metrics.MetricEnforcementProcessKilled, stats.processesKilled, ruleIDTag, 1) + stats.processesKilled = 0 + } + } + p.perRuleStatsLock.Unlock() + + p.ruleDisarmersLock.Lock() + for ruleID, disarmer := range p.ruleDisarmers { + ruleIDTag := []string{ + "rule_id:" + string(ruleID), + } + + disarmer.Lock() + for disarmerType, count := range disarmer.disarmedCount { + if count > 0 { + tags := append([]string{"disarmer_type:" + string(disarmerType)}, ruleIDTag...) + _ = statsd.Count(metrics.MetricEnforcementRuleDisarmed, count, tags, 1) + disarmer.disarmedCount[disarmerType] = 0 + } + } + if disarmer.rearmedCount > 0 { + _ = statsd.Count(metrics.MetricEnforcementRuleRearmed, disarmer.rearmedCount, ruleIDTag, 1) + disarmer.rearmedCount = 0 + } + disarmer.Unlock() + } + p.ruleDisarmersLock.Unlock() +} + // Start starts the go rountine responsible for flushing the disarmer caches func (p *ProcessKiller) Start(ctx context.Context, wg *sync.WaitGroup) { - if !p.cfg.RuntimeSecurity.EnforcementEnabled || (!p.cfg.RuntimeSecurity.EnforcementDisarmerContainerEnabled && !p.cfg.RuntimeSecurity.EnforcementDisarmerExecutableEnabled) { + if !p.cfg.RuntimeSecurity.EnforcementEnabled { return } wg.Add(1) go func() { defer wg.Done() - ticker := time.NewTicker(dismarmerCacheFlushInterval) + ticker := time.NewTicker(disarmerCacheFlushInterval) defer ticker.Stop() + state := stopped for { - select { - case <-ticker.C: - p.ruleDisarmersLock.Lock() - for _, disarmer := range p.ruleDisarmers { - disarmer.Lock() - var cLength, eLength int - if disarmer.containerCache != nil { - cLength = disarmer.containerCache.flush() + switch state { + case stopped: + select { + case state = <-p.disarmerStateCh: + if state == running { + ticker.Reset(disarmerCacheFlushInterval) } - if disarmer.executableCache != nil { - eLength = disarmer.executableCache.flush() + break + case <-ctx.Done(): + return + } + case running: + select { + case state = <-p.disarmerStateCh: + if state == stopped { + ticker.Stop() } - if disarmer.disarmed && cLength == 0 && eLength == 0 { - disarmer.disarmed = false - seclog.Infof("kill action of rule `%s` has been re-armed", disarmer.ruleID) + break + case <-ctx.Done(): + return + case <-ticker.C: + p.ruleDisarmersLock.Lock() + for ruleID, disarmer := range p.ruleDisarmers { + disarmer.Lock() + var cLength, eLength int + if disarmer.container.enabled { + cLength = disarmer.containerCache.flush() + } + if disarmer.executable.enabled { + eLength = disarmer.executableCache.flush() + } + if disarmer.disarmed && cLength == 0 && eLength == 0 { + disarmer.disarmed = false + disarmer.rearmedCount++ + seclog.Infof("kill action of rule `%s` has been re-armed", ruleID) + } + disarmer.Unlock() } - disarmer.Unlock() + p.ruleDisarmersLock.Unlock() } - p.ruleDisarmersLock.Unlock() - case <-ctx.Done(): - return } } }() } -type killDisarmer struct { +func (p *ProcessKiller) getDisarmerParams(kill *rules.KillDefinition) (*disarmerParams, *disarmerParams) { + var containerParams, executableParams disarmerParams + + if kill.Disarmer != nil && kill.Disarmer.Container != nil && kill.Disarmer.Container.MaxAllowed > 0 { + containerParams.enabled = true + containerParams.capacity = uint64(kill.Disarmer.Container.MaxAllowed) + containerParams.period = kill.Disarmer.Container.Period + } else if p.cfg.RuntimeSecurity.EnforcementDisarmerContainerEnabled { + containerParams.enabled = true + containerParams.capacity = uint64(p.cfg.RuntimeSecurity.EnforcementDisarmerContainerMaxAllowed) + containerParams.period = p.cfg.RuntimeSecurity.EnforcementDisarmerContainerPeriod + } + + if kill.Disarmer != nil && kill.Disarmer.Executable != nil && kill.Disarmer.Executable.MaxAllowed > 0 { + executableParams.enabled = true + executableParams.capacity = uint64(kill.Disarmer.Executable.MaxAllowed) + executableParams.period = kill.Disarmer.Executable.Period + } else if p.cfg.RuntimeSecurity.EnforcementDisarmerExecutableEnabled { + executableParams.enabled = true + executableParams.capacity = uint64(p.cfg.RuntimeSecurity.EnforcementDisarmerExecutableMaxAllowed) + executableParams.period = p.cfg.RuntimeSecurity.EnforcementDisarmerExecutablePeriod + } + + return &containerParams, &executableParams +} + +type disarmerState int + +const ( + stopped disarmerState = iota + running +) + +type disarmerType string + +const ( + containerDisarmerType = disarmerType("container") + executableDisarmerType = disarmerType("executable") +) + +type ruleDisarmer struct { sync.Mutex disarmed bool - ruleID rules.RuleID + container disarmerParams containerCache *disarmerCache[string, bool] + executable disarmerParams executableCache *disarmerCache[string, bool] + // stats + disarmedCount map[disarmerType]int64 + rearmedCount int64 +} + +type disarmerParams struct { + enabled bool + capacity uint64 + period time.Duration } type disarmerCache[K comparable, V any] struct { @@ -276,18 +467,18 @@ type disarmerCache[K comparable, V any] struct { capacity uint64 } -func newDisarmerCache[K comparable, V any](capacity uint64, period time.Duration) *disarmerCache[K, V] { +func newDisarmerCache[K comparable, V any](params *disarmerParams) *disarmerCache[K, V] { cacheOpts := []ttlcache.Option[K, V]{ - ttlcache.WithCapacity[K, V](capacity), + ttlcache.WithCapacity[K, V](params.capacity), } - if period > 0 { - cacheOpts = append(cacheOpts, ttlcache.WithTTL[K, V](period)) + if params.period > 0 { + cacheOpts = append(cacheOpts, ttlcache.WithTTL[K, V](params.period)) } return &disarmerCache[K, V]{ - Cache: ttlcache.New[K, V](cacheOpts...), - capacity: capacity, + Cache: ttlcache.New(cacheOpts...), + capacity: params.capacity, } } @@ -296,30 +487,28 @@ func (c *disarmerCache[K, V]) flush() int { return c.Len() } -func newKillDisarmer(cfg *config.RuntimeSecurityConfig, ruleID rules.RuleID) *killDisarmer { - kd := &killDisarmer{ - disarmed: false, - ruleID: ruleID, +func newRuleDisarmer(containerParams *disarmerParams, executableParams *disarmerParams) *ruleDisarmer { + kd := &ruleDisarmer{ + disarmed: false, + container: *containerParams, + executable: *executableParams, + disarmedCount: make(map[disarmerType]int64), } - if cfg.EnforcementDisarmerContainerEnabled { - kd.containerCache = newDisarmerCache[string, bool](uint64(cfg.EnforcementDisarmerContainerMaxAllowed), cfg.EnforcementDisarmerContainerPeriod) + if kd.container.enabled { + kd.containerCache = newDisarmerCache[string, bool](containerParams) } - if cfg.EnforcementDisarmerExecutableEnabled { - kd.executableCache = newDisarmerCache[string, bool](uint64(cfg.EnforcementDisarmerExecutableMaxAllowed), cfg.EnforcementDisarmerExecutablePeriod) + if kd.executable.enabled { + kd.executableCache = newDisarmerCache[string, bool](executableParams) } return kd } -func (kd *killDisarmer) allow(cache *disarmerCache[string, bool], key string, onDisarm func()) bool { - kd.Lock() - defer kd.Unlock() - - if kd.disarmed { - return false - } +func (rd *ruleDisarmer) allow(cache *disarmerCache[string, bool], key string, onDisarm func()) bool { + rd.Lock() + defer rd.Unlock() if cache == nil { return true @@ -331,11 +520,11 @@ func (kd *killDisarmer) allow(cache *disarmerCache[string, bool], key string, on if cache.Get(key) == nil { alreadyAtCapacity := uint64(cache.Len()) >= cache.capacity cache.Set(key, true, ttlcache.DefaultTTL) - if alreadyAtCapacity && !kd.disarmed { - kd.disarmed = true + if alreadyAtCapacity && !rd.disarmed { + rd.disarmed = true onDisarm() } } - return !kd.disarmed + return !rd.disarmed } diff --git a/pkg/security/probe/process_killer_test.go b/pkg/security/probe/process_killer_test.go index d9cb203df81cd..091b8b19d2592 100644 --- a/pkg/security/probe/process_killer_test.go +++ b/pkg/security/probe/process_killer_test.go @@ -29,13 +29,25 @@ func TestProcessKillerExclusion(t *testing.T) { }, }, ) - assert.Nil(t, err) - assert.True(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/usr/bin/date"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/usr/bin/dd"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/usr/sbin/sudo"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid()}, []string{"/usr/bin/date"})) - assert.False(t, p.isKillAllowed([]uint32{1}, []string{"/usr/bin/date"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/opt/datadog-agent/bin/agent/agent"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/opt/datadog-packages/datadog-agent/v1.0.0/bin/agent/agent"})) + + pid := utils.Getpid() + tests := []struct { + pids []uint32 + paths []string + expectedResult bool + }{ + {[]uint32{pid + 1}, []string{"/usr/bin/date"}, true}, + {[]uint32{pid + 1}, []string{"/usr/bin/dd"}, false}, + {[]uint32{pid + 1}, []string{"/usr/sbin/sudo"}, false}, + {[]uint32{pid}, []string{"/usr/bin/date"}, false}, + {[]uint32{1}, []string{"/usr/bin/date"}, false}, + {[]uint32{pid + 1}, []string{"/opt/datadog-agent/bin/agent/agent"}, false}, + {[]uint32{pid + 1}, []string{"/opt/datadog-packages/datadog-agent/v1.0.0/bin/agent/agent"}, false}, + } + + for _, test := range tests { + isKilledAllowed, _ := p.isKillAllowed(test.pids, test.paths) + assert.Equal(t, test.expectedResult, isKilledAllowed) + } } diff --git a/pkg/security/probe/security_profile.go b/pkg/security/probe/security_profile.go index 610497a48c734..9f281a524bffb 100644 --- a/pkg/security/probe/security_profile.go +++ b/pkg/security/probe/security_profile.go @@ -99,6 +99,16 @@ func (spm *SecurityProfileManagers) AddActivityDumpHandler(handler dump.Activity } } +// DumpActivity handles an activity dump request +func (spm *SecurityProfileManagers) DumpActivity(params *api.ActivityDumpParams) (*api.ActivityDumpMessage, error) { + if spm.activityDumpManager == nil { + return &api.ActivityDumpMessage{ + Error: ErrActivityDumpManagerDisabled.Error(), + }, ErrActivityDumpManagerDisabled + } + return spm.activityDumpManager.DumpActivity(params) +} + // ListActivityDumps returns the list of active dumps func (spm *SecurityProfileManagers) ListActivityDumps(params *api.ActivityDumpListParams) (*api.ActivityDumpListMessage, error) { if spm.activityDumpManager == nil { diff --git a/pkg/security/rconfig/policies.go b/pkg/security/rconfig/policies.go index c9bed099acaff..e7fe1ea3129ca 100644 --- a/pkg/security/rconfig/policies.go +++ b/pkg/security/rconfig/policies.go @@ -18,8 +18,8 @@ import ( "github.com/skydive-project/go-debouncer" "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -41,35 +41,37 @@ type RCPolicyProvider struct { lastCustoms map[string]state.RawConfig debouncer *debouncer.Debouncer dumpPolicies bool + setEnforcementCb func(bool) } var _ rules.PolicyProvider = (*RCPolicyProvider)(nil) // NewRCPolicyProvider returns a new Remote Config based policy provider -func NewRCPolicyProvider(dumpPolicies bool) (*RCPolicyProvider, error) { +func NewRCPolicyProvider(dumpPolicies bool, setEnforcementCallback func(bool)) (*RCPolicyProvider, error) { agentVersion, err := utils.GetAgentSemverVersion() if err != nil { return nil, fmt.Errorf("failed to parse agent version: %w", err) } - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("failed to get ipc address: %w", err) } - c, err := client.NewGRPCClient(ipcAddress, config.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(config.Datadog()) }, + c, err := client.NewGRPCClient(ipcAddress, pkgconfigsetup.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }, client.WithAgent(agentName, agentVersion.String()), client.WithProducts(state.ProductCWSDD, state.ProductCWSCustom), client.WithPollInterval(securityAgentRCPollInterval), - client.WithDirectorRootOverride(config.Datadog().GetString("site"), config.Datadog().GetString("remote_configuration.director_root")), + client.WithDirectorRootOverride(pkgconfigsetup.Datadog().GetString("site"), pkgconfigsetup.Datadog().GetString("remote_configuration.director_root")), ) if err != nil { return nil, err } r := &RCPolicyProvider{ - client: c, - dumpPolicies: dumpPolicies, + client: c, + dumpPolicies: dumpPolicies, + setEnforcementCb: setEnforcementCallback, } r.debouncer = debouncer.New(debounceDelay, r.onNewPoliciesReady) @@ -82,12 +84,18 @@ func (r *RCPolicyProvider) Start() { r.debouncer.Start() - r.client.Subscribe(state.ProductCWSDD, r.rcDefaultsUpdateCallback) - r.client.Subscribe(state.ProductCWSCustom, r.rcCustomsUpdateCallback) + r.client.SubscribeAll(state.ProductCWSDD, client.NewListener(r.rcDefaultsUpdateCallback, r.rcStateChanged)) + r.client.SubscribeAll(state.ProductCWSCustom, client.NewListener(r.rcCustomsUpdateCallback, r.rcStateChanged)) r.client.Start() } +func (r *RCPolicyProvider) rcStateChanged(state bool) { + if r.setEnforcementCb != nil { + r.setEnforcementCb(state) + } +} + func (r *RCPolicyProvider) rcDefaultsUpdateCallback(configs map[string]state.RawConfig, _ func(string, state.ApplyStatus)) { r.Lock() if len(r.lastDefaults) == 0 && len(configs) == 0 { @@ -191,6 +199,10 @@ func (r *RCPolicyProvider) onNewPoliciesReady() { defer r.RUnlock() if r.onNewPoliciesReadyCb != nil { + if r.setEnforcementCb != nil { + r.setEnforcementCb(true) + } + r.onNewPoliciesReadyCb() } } diff --git a/pkg/security/rconfig/profiles.go b/pkg/security/rconfig/profiles.go deleted file mode 100644 index 5441d8621ed5b..0000000000000 --- a/pkg/security/rconfig/profiles.go +++ /dev/null @@ -1,150 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build linux - -// Package rconfig holds rconfig related files -package rconfig - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - proto "github.com/DataDog/agent-payload/v5/cws/dumpsv1" - "github.com/DataDog/datadog-go/v5/statsd" - - "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/config/remote/client" - "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" - cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" - "github.com/DataDog/datadog-agent/pkg/security/utils" - "github.com/DataDog/datadog-agent/pkg/util/log" -) - -const ( - // image name/image tag separator - separator = ":::" -) - -// ProfileConfig defines a profile config -type ProfileConfig struct { - Tags []string - Profile []byte -} - -// RCProfileProvider defines a RC profile provider -type RCProfileProvider struct { - sync.RWMutex - - client *client.Client - - onNewProfileCallback func(selector cgroupModel.WorkloadSelector, profile *proto.SecurityProfile) -} - -// Stop stops the client -func (r *RCProfileProvider) Stop() error { - r.client.Close() - return nil -} - -func (r *RCProfileProvider) rcProfilesUpdateCallback(configs map[string]state.RawConfig, _ func(string, state.ApplyStatus)) { - for _, config := range configs { - var profCfg ProfileConfig - if err := json.Unmarshal(config.Config, &profCfg); err != nil { - log.Errorf("couldn't decode json profile: %s", err) - continue - } - - profile := &proto.SecurityProfile{} - if err := profile.UnmarshalVT([]byte(profCfg.Profile)); err != nil { - log.Errorf("couldn't decode protobuf profile: %s", err) - continue - } - - selector, err := cgroupModel.NewWorkloadSelector(profile.Selector.ImageName, profile.Selector.ImageTag) - if err != nil { - log.Errorf("selector error %s/%s: %v", profile.Selector.ImageName, profile.Selector.ImageTag, err) - continue - } - - log.Tracef("got a new profile for %v : %v", selector, profile) - r.onNewProfileCallback(selector, profile) - } -} - -// Start starts the Remote Config profile provider and subscribes to updates -func (r *RCProfileProvider) Start(ctx context.Context) error { - log.Info("remote-config profile provider started") - - r.client.Start() - r.client.Subscribe(state.ProductCWSProfiles, r.rcProfilesUpdateCallback) - - go func() { - <-ctx.Done() - _ = r.Stop() - }() - - return nil -} - -func selectorToTag(selector *cgroupModel.WorkloadSelector) string { - return selector.Image + separator + selector.Tag -} - -// UpdateWorkloadSelectors updates the selectors used to query profiles -func (r *RCProfileProvider) UpdateWorkloadSelectors(selectors []cgroupModel.WorkloadSelector) { - r.Lock() - defer r.Unlock() - - log.Tracef("updating workload selector: %v", selectors) - - var tags []string - - for _, selector := range selectors { - tags = append(tags, selectorToTag(&selector)) - } - - r.client.SetCWSWorkloads(tags) -} - -// SetOnNewProfileCallback sets the onNewProfileCallback function -func (r *RCProfileProvider) SetOnNewProfileCallback(onNewProfileCallback func(selector cgroupModel.WorkloadSelector, profile *proto.SecurityProfile)) { - r.onNewProfileCallback = onNewProfileCallback -} - -// SendStats sends the metrics of the directory provider -func (r *RCProfileProvider) SendStats(_ statsd.ClientInterface) error { - return nil -} - -// NewRCProfileProvider returns a new Remote Config based policy provider -func NewRCProfileProvider() (*RCProfileProvider, error) { - agentVersion, err := utils.GetAgentSemverVersion() - if err != nil { - return nil, fmt.Errorf("failed to parse agent version: %v", err) - } - - ipcAddress, err := config.GetIPCAddress() - if err != nil { - return nil, fmt.Errorf("failed to get ipc address: %w", err) - } - - c, err := client.NewGRPCClient(ipcAddress, config.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(config.Datadog()) }, - client.WithAgent(agentName, agentVersion.String()), - client.WithProducts(state.ProductCWSProfiles), - client.WithPollInterval(securityAgentRCPollInterval)) - if err != nil { - return nil, err - } - - r := &RCProfileProvider{ - client: c, - } - - return r, nil -} diff --git a/pkg/security/reporter/reporter.go b/pkg/security/reporter/reporter.go index b4d40000fcc06..1c8a4f71bf5a0 100644 --- a/pkg/security/reporter/reporter.go +++ b/pkg/security/reporter/reporter.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" logsconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" @@ -52,7 +52,7 @@ func newReporter(hostname string, stopper startstop.Stopper, sourceName, sourceT stopper.Add(auditor) // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(logsconfig.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfig.Datadog()) + pipelineProvider := pipeline.NewProvider(logsconfig.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) pipelineProvider.Start() stopper.Add(pipelineProvider) diff --git a/pkg/security/resolvers/cgroup/model/model.go b/pkg/security/resolvers/cgroup/model/model.go index 5e2b8bcc6ba9d..15a9046510363 100644 --- a/pkg/security/resolvers/cgroup/model/model.go +++ b/pkg/security/resolvers/cgroup/model/model.go @@ -76,7 +76,7 @@ type CacheEntry struct { sync.RWMutex Deleted *atomic.Bool WorkloadSelector WorkloadSelector - PIDs map[uint32]int8 + PIDs map[uint32]bool } // NewCacheEntry returns a new instance of a CacheEntry @@ -90,11 +90,11 @@ func NewCacheEntry(containerID string, cgroupFlags uint64, pids ...uint32) (*Cac ContainerContext: model.ContainerContext{ ContainerID: containerutils.ContainerID(containerID), }, - PIDs: make(map[uint32]int8, 10), + PIDs: make(map[uint32]bool, 10), } for _, pid := range pids { - newCGroup.PIDs[pid] = 1 + newCGroup.PIDs[pid] = true } return &newCGroup, nil } @@ -127,7 +127,7 @@ func (cgce *CacheEntry) AddPID(pid uint32) { cgce.Lock() defer cgce.Unlock() - cgce.PIDs[pid] = 1 + cgce.PIDs[pid] = true } // SetTags sets the tags for the provided workload diff --git a/pkg/security/resolvers/cgroup/resolver.go b/pkg/security/resolvers/cgroup/resolver.go index 6f09ea32771fc..1e60d8de0c1f8 100644 --- a/pkg/security/resolvers/cgroup/resolver.go +++ b/pkg/security/resolvers/cgroup/resolver.go @@ -30,6 +30,8 @@ const ( WorkloadSelectorResolved Event = iota // CGroupDeleted is used to notify that a cgroup was deleted CGroupDeleted + // CGroupCreated new croup created + CGroupCreated // CGroupMaxEvent is used cap the event ID CGroupMaxEvent ) @@ -137,6 +139,13 @@ func (cr *Resolver) AddPID(process *model.ProcessCacheEntry) { // add the new CGroup to the cache cr.workloads.Add(string(process.ContainerID), newCGroup) + // notify listeners + cr.listenersLock.Lock() + for _, l := range cr.listeners[CGroupCreated] { + l(newCGroup) + } + cr.listenersLock.Unlock() + // check the tags of this workload cr.checkTags(newCGroup) } diff --git a/pkg/security/resolvers/hash/resolver_linux.go b/pkg/security/resolvers/hash/resolver_linux.go index 48b0d30fa3acb..7de1ef8e8a13c 100644 --- a/pkg/security/resolvers/hash/resolver_linux.go +++ b/pkg/security/resolvers/hash/resolver_linux.go @@ -18,7 +18,6 @@ import ( "io/fs" "os" "slices" - "syscall" "github.com/DataDog/datadog-go/v5/statsd" "github.com/glaslos/ssdeep" @@ -29,6 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" + "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/utils" ) @@ -161,9 +161,17 @@ func NewResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.ClientInte // ComputeHashesFromEvent calls ComputeHashes using the provided event func (resolver *Resolver) ComputeHashesFromEvent(event *model.Event, file *model.FileEvent) []string { + if !resolver.opts.Enabled { + return nil + } + // resolve FileEvent event.FieldHandlers.ResolveFilePath(event, file) - return resolver.ComputeHashes(event.GetEventType(), &event.ProcessContext.Process, file) + + process := event.ProcessContext.Process + resolver.HashFileEvent(event.GetEventType(), process.ContainerID, process.Pid, file) + + return file.Hashes } // ComputeHashes computes the hashes of the provided file event. @@ -173,23 +181,8 @@ func (resolver *Resolver) ComputeHashes(eventType model.EventType, process *mode return nil } - // check state - if file.HashState == model.Done { - return file.Hashes - } - if file.HashState != model.NoHash && file.HashState != model.HashWasRateLimited { - // this file was already processed and an error occurred, nothing else to do - return nil - } - - // check if the resolver is allowed to hash this event type - if !slices.Contains(resolver.opts.EventTypes, eventType) { - file.HashState = model.EventTypeNotConfigured - resolver.hashMiss[eventType][model.EventTypeNotConfigured].Inc() - return nil - } + resolver.HashFileEvent(eventType, process.ContainerID, process.Pid, file) - resolver.hash(eventType, process, file) return file.Hashes } @@ -215,22 +208,41 @@ type fileUniqKey struct { } func getFileInfo(path string) (fs.FileMode, int64, fileUniqKey, error) { - fileInfo, err := os.Stat(path) + stat, err := utils.UnixStat(path) if err != nil { return 0, 0, fileUniqKey{}, err } - stat := fileInfo.Sys().(*syscall.Stat_t) fkey := fileUniqKey{ dev: stat.Dev, inode: stat.Ino, } - return fileInfo.Mode(), fileInfo.Size(), fkey, nil + return utils.UnixStatModeToGoFileMode(stat.Mode), stat.Size, fkey, nil } -// hash hashes the provided file event -func (resolver *Resolver) hash(eventType model.EventType, process *model.Process, file *model.FileEvent) { +// HashFileEvent hashes the provided file event +func (resolver *Resolver) HashFileEvent(eventType model.EventType, ctrID containerutils.ContainerID, pid uint32, file *model.FileEvent) { + if !resolver.opts.Enabled { + return + } + + // check state + if file.HashState == model.Done { + return + } + if file.HashState != model.NoHash && file.HashState != model.HashWasRateLimited { + // this file was already processed and an error occurred, nothing else to do + return + } + + // check if the resolver is allowed to hash this event type + if !slices.Contains(resolver.opts.EventTypes, eventType) { + file.HashState = model.EventTypeNotConfigured + resolver.hashMiss[eventType][model.EventTypeNotConfigured].Inc() + return + } + if !file.IsPathnameStrResolved || len(file.PathnameStr) == 0 { resolver.hashMiss[eventType][model.PathnameResolutionError].Inc() file.HashState = model.PathnameResolutionError @@ -246,7 +258,7 @@ func (resolver *Resolver) hash(eventType model.EventType, process *model.Process // check if the hash(es) of this file is in cache fileKey := LRUCacheKey{ path: file.PathnameStr, - containerID: string(process.ContainerID), + containerID: string(ctrID), inode: file.Inode, pathID: file.PathKey.PathID, } @@ -269,9 +281,10 @@ func (resolver *Resolver) hash(eventType model.EventType, process *model.Process return } - rootPIDs := []uint32{process.Pid} + // add pid one for hash resolution outside of a container + rootPIDs := []uint32{1, pid} if resolver.cgroupResolver != nil { - w, ok := resolver.cgroupResolver.GetWorkload(string(process.ContainerID)) + w, ok := resolver.cgroupResolver.GetWorkload(string(ctrID)) if ok { rootPIDs = w.GetPIDs() } diff --git a/pkg/security/resolvers/process/resolver_windows.go b/pkg/security/resolvers/process/resolver_windows.go index c629254201dd6..2694f5fda471e 100644 --- a/pkg/security/resolvers/process/resolver_windows.go +++ b/pkg/security/resolvers/process/resolver_windows.go @@ -131,13 +131,16 @@ func (p *Resolver) DeleteEntry(pid uint32, exitTime time.Time) { } // AddNewEntry add a new process entry to the cache -func (p *Resolver) AddNewEntry(pid uint32, ppid uint32, file string, commandLine string, OwnerSidString string) (*model.ProcessCacheEntry, error) { +func (p *Resolver) AddNewEntry(pid uint32, ppid uint32, file string, envs []string, commandLine string, OwnerSidString string) (*model.ProcessCacheEntry, error) { e := p.processCacheEntryPool.Get() e.PIDContext.Pid = pid e.PPid = ppid e.Process.CmdLine = utils.NormalizePath(commandLine) e.Process.FileEvent.PathnameStr = utils.NormalizePath(file) e.Process.FileEvent.BasenameStr = filepath.Base(e.Process.FileEvent.PathnameStr) + e.Process.EnvsEntry = &model.EnvsEntry{ + Values: envs, + } e.ExecTime = time.Now() e.Process.OwnerSidString = OwnerSidString p.insertEntry(e) diff --git a/pkg/security/resolvers/resolvers_ebpf.go b/pkg/security/resolvers/resolvers_ebpf.go index 0b4d84fa2cbeb..b3ca8bb549b2b 100644 --- a/pkg/security/resolvers/resolvers_ebpf.go +++ b/pkg/security/resolvers/resolvers_ebpf.go @@ -18,7 +18,6 @@ import ( manager "github.com/DataDog/ebpf-manager" "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/probe/erpc" @@ -64,7 +63,7 @@ type EBPFResolvers struct { } // NewEBPFResolvers creates a new instance of EBPFResolvers -func NewEBPFResolvers(config *config.Config, manager *manager.Manager, statsdClient statsd.ClientInterface, scrubber *procutil.DataScrubber, eRPC *erpc.ERPC, opts Opts, wmeta workloadmeta.Component, telemetry telemetry.Component) (*EBPFResolvers, error) { +func NewEBPFResolvers(config *config.Config, manager *manager.Manager, statsdClient statsd.ClientInterface, scrubber *procutil.DataScrubber, eRPC *erpc.ERPC, opts Opts, telemetry telemetry.Component) (*EBPFResolvers, error) { dentryResolver, err := dentry.NewResolver(config.Probe, statsdClient, eRPC) if err != nil { return nil, err @@ -85,7 +84,7 @@ func NewEBPFResolvers(config *config.Config, manager *manager.Manager, statsdCli var sbomResolver *sbom.Resolver if config.RuntimeSecurity.SBOMResolverEnabled { - sbomResolver, err = sbom.NewSBOMResolver(config.RuntimeSecurity, statsdClient, wmeta) + sbomResolver, err = sbom.NewSBOMResolver(config.RuntimeSecurity, statsdClient) if err != nil { return nil, err } diff --git a/pkg/security/resolvers/sbom/resolver.go b/pkg/security/resolvers/sbom/resolver.go index 2a64784d6b793..8b03e0831f76a 100644 --- a/pkg/security/resolvers/sbom/resolver.go +++ b/pkg/security/resolvers/sbom/resolver.go @@ -26,7 +26,7 @@ import ( "go.uber.org/atomic" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/sbom/collectors/host" @@ -152,8 +152,8 @@ type Resolver struct { } // NewSBOMResolver returns a new instance of Resolver -func NewSBOMResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.ClientInterface, wmeta workloadmeta.Component) (*Resolver, error) { - sbomScanner, err := sbomscanner.CreateGlobalScanner(coreconfig.SystemProbe(), optional.NewOption(wmeta)) +func NewSBOMResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.ClientInterface) (*Resolver, error) { + sbomScanner, err := sbomscanner.CreateGlobalScanner(pkgconfigsetup.SystemProbe(), optional.NewNoneOption[workloadmeta.Component]()) if err != nil { return nil, err } @@ -208,7 +208,7 @@ func (r *Resolver) prepareContextTags() { r.contextTags = append(r.contextTags, fmt.Sprintf("host:%s", r.hostname)) // merge tags from config - for _, tag := range configUtils.GetConfiguredTags(coreconfig.Datadog(), true) { + for _, tag := range configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true) { if strings.HasPrefix(tag, "host") { continue } diff --git a/pkg/security/resolvers/sbom/resolver_unsupported.go b/pkg/security/resolvers/sbom/resolver_unsupported.go index 43583a80e7f78..a9b05d167589a 100644 --- a/pkg/security/resolvers/sbom/resolver_unsupported.go +++ b/pkg/security/resolvers/sbom/resolver_unsupported.go @@ -13,7 +13,6 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/security/config" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" "github.com/DataDog/datadog-agent/pkg/security/secl/model" @@ -24,7 +23,7 @@ type Resolver struct { } // NewSBOMResolver returns a new instance of Resolver -func NewSBOMResolver(_ *config.RuntimeSecurityConfig, _ statsd.ClientInterface, _ workloadmeta.Component) (*Resolver, error) { +func NewSBOMResolver(_ *config.RuntimeSecurityConfig, _ statsd.ClientInterface) (*Resolver, error) { return &Resolver{}, nil } diff --git a/pkg/security/resolvers/tags/resolver.go b/pkg/security/resolvers/tags/resolver.go index 57dd03cbb3caf..caa06d60626cf 100644 --- a/pkg/security/resolvers/tags/resolver.go +++ b/pkg/security/resolvers/tags/resolver.go @@ -8,12 +8,13 @@ package tags import ( "context" + "strings" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/remote" taggerTelemetry "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/telemetry" - rootconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/probe/config" "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -72,6 +73,12 @@ func (t *DefaultResolver) Start(ctx context.Context) error { // Resolve returns the tags for the given id func (t *DefaultResolver) Resolve(id string) []string { + // container id for ecs task are composed of task id + container id. + // use only the container id part for the tag resolution. + if els := strings.Split(id, "-"); len(els) == 2 { + id = els[1] + } + entityID := types.NewEntityID(types.ContainerID, id) tags, _ := t.tagger.Tag(entityID.String(), types.OrchestratorCardinality) return tags @@ -96,12 +103,12 @@ func (t *DefaultResolver) Stop() error { // NewResolver returns a new tags resolver func NewResolver(config *config.Config, telemetry telemetry.Component) Resolver { if config.RemoteTaggerEnabled { - options, err := remote.NodeAgentOptionsForSecurityResolvers(rootconfig.Datadog()) + options, err := remote.NodeAgentOptionsForSecurityResolvers(pkgconfigsetup.Datadog()) if err != nil { log.Errorf("unable to configure the remote tagger: %s", err) } else { return &DefaultResolver{ - tagger: remote.NewTagger(options, rootconfig.Datadog(), taggerTelemetry.NewStore(telemetry)), + tagger: remote.NewTagger(options, pkgconfigsetup.Datadog(), taggerTelemetry.NewStore(telemetry)), } } } diff --git a/pkg/security/rules/autosuppression/autosuppression.go b/pkg/security/rules/autosuppression/autosuppression.go index 168eb23087be6..56f9d0b28ce34 100644 --- a/pkg/security/rules/autosuppression/autosuppression.go +++ b/pkg/security/rules/autosuppression/autosuppression.go @@ -38,7 +38,9 @@ const ( // Opts holds options for auto suppression type Opts struct { + SecurityProfileEnabled bool SecurityProfileAutoSuppressionEnabled bool + ActivityDumpEnabled bool ActivityDumpAutoSuppressionEnabled bool EventTypes []model.EventType } @@ -68,7 +70,7 @@ func (as *AutoSuppression) Init(opts Opts) { // Suppresses returns true if the event should be suppressed for the given rule, false otherwise. It also counts statistics depending on this result func (as *AutoSuppression) Suppresses(rule *rules.Rule, event *model.Event) bool { if isAllowAutosuppressionRule(rule) && event.ContainerContext.ContainerID != "" && slices.Contains(as.opts.EventTypes, event.GetEventType()) { - if as.opts.ActivityDumpAutoSuppressionEnabled { + if as.opts.ActivityDumpEnabled && as.opts.ActivityDumpAutoSuppressionEnabled { if event.HasActiveActivityDump() { as.count(rule.ID, activityDumpSuppressionType) return true @@ -77,7 +79,7 @@ func (as *AutoSuppression) Suppresses(rule *rules.Rule, event *model.Event) bool return true } } - if as.opts.SecurityProfileAutoSuppressionEnabled { + if as.opts.SecurityProfileEnabled && as.opts.SecurityProfileAutoSuppressionEnabled { if event.IsInProfile() { as.count(rule.ID, securityProfileSuppressionType) return true diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index 67e48fa8845b2..6992f83dc23a6 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -63,6 +63,7 @@ type RuleEngine struct { eventSender events.EventSender rulesetListeners []rules.RuleSetListener AutoSuppression autosuppression.AutoSuppression + pid uint32 } // APIServer defines the API server @@ -85,10 +86,13 @@ func NewRuleEngine(evm *eventmonitor.EventMonitor, config *config.RuntimeSecurit policyLoader: rules.NewPolicyLoader(), statsdClient: statsdClient, rulesetListeners: rulesetListeners, + pid: utils.Getpid(), } engine.AutoSuppression.Init(autosuppression.Opts{ + SecurityProfileEnabled: config.SecurityProfileEnabled, SecurityProfileAutoSuppressionEnabled: config.SecurityProfileAutoSuppressionEnabled, + ActivityDumpEnabled: config.ActivityDumpEnabled, ActivityDumpAutoSuppressionEnabled: config.ActivityDumpAutoSuppressionEnabled, EventTypes: config.SecurityProfileAutoSuppressionEventTypes, }) @@ -164,6 +168,7 @@ func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}, wg * if err := e.ReloadPolicies(); err != nil { seclog.Errorf("failed to reload policies: %s", err) } + e.probe.PlaySnapshot() } }() @@ -360,7 +365,7 @@ func (e *RuleEngine) gatherDefaultPolicyProviders() []rules.PolicyProvider { // add remote config as config provider if enabled. if e.config.RemoteConfigurationEnabled { - rcPolicyProvider, err := rconfig.NewRCPolicyProvider(e.config.RemoteConfigurationDumpPolicies) + rcPolicyProvider, err := rconfig.NewRCPolicyProvider(e.config.RemoteConfigurationDumpPolicies, e.rcStateCallback) if err != nil { seclog.Errorf("will be unable to load remote policies: %s", err) } else { @@ -378,6 +383,15 @@ func (e *RuleEngine) gatherDefaultPolicyProviders() []rules.PolicyProvider { return policyProviders } +func (e *RuleEngine) rcStateCallback(state bool) { + if state { + seclog.Infof("Connection to remote config established") + } else { + seclog.Infof("Connection to remote config lost") + } + e.probe.EnableEnforcement(state) +} + // EventDiscarderFound is called by the ruleset when a new discarder discovered func (e *RuleEngine) EventDiscarderFound(rs *rules.RuleSet, event eval.Event, field eval.Field, eventType eval.EventType) { if e.reloading.Load() { @@ -505,6 +519,11 @@ func (e *RuleEngine) SetRulesetLoadedCallback(cb func(es *rules.RuleSet, err *mu // HandleEvent is called by the probe when an event arrives from the kernel func (e *RuleEngine) HandleEvent(event *model.Event) { + // don't eval event originating from myself + if !e.probe.Opts.DontDiscardRuntime && event.ProcessContext != nil && event.ProcessContext.Pid == e.pid { + return + } + // event already marked with an error, skip it if event.Error != nil { return diff --git a/pkg/security/secl/compiler/eval/eval_operators.go b/pkg/security/secl/compiler/eval/eval_operators.go index 9b8f3ee330b0f..515a1393c3c22 100644 --- a/pkg/security/secl/compiler/eval/eval_operators.go +++ b/pkg/security/secl/compiler/eval/eval_operators.go @@ -478,13 +478,13 @@ func GreaterThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -549,13 +549,13 @@ func GreaterOrEqualThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEv isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -620,13 +620,13 @@ func LesserThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator, isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -691,13 +691,13 @@ func LesserOrEqualThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEva isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -762,13 +762,13 @@ func DurationLesserThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEv isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -833,13 +833,13 @@ func DurationLesserOrEqualThan(a *IntEvaluator, b *IntEvaluator, state *State) ( isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -904,13 +904,13 @@ func DurationGreaterThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolE isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -975,13 +975,13 @@ func DurationGreaterOrEqualThan(a *IntEvaluator, b *IntEvaluator, state *State) isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -1117,13 +1117,13 @@ func DurationLesserThanArithmeticOperation(a *IntEvaluator, b *IntEvaluator, sta isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -1188,13 +1188,13 @@ func DurationLesserOrEqualThanArithmeticOperation(a *IntEvaluator, b *IntEvaluat isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -1259,13 +1259,13 @@ func DurationGreaterThanArithmeticOperation(a *IntEvaluator, b *IntEvaluator, st isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -1330,13 +1330,13 @@ func DurationGreaterOrEqualThanArithmeticOperation(a *IntEvaluator, b *IntEvalua isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } diff --git a/pkg/security/secl/compiler/eval/field.go b/pkg/security/secl/compiler/eval/field.go index 8646c1aa3c0d7..2b9f6f1cc5297 100644 --- a/pkg/security/secl/compiler/eval/field.go +++ b/pkg/security/secl/compiler/eval/field.go @@ -23,6 +23,7 @@ const ( BitmaskValueType FieldValueType = 1 << 4 VariableValueType FieldValueType = 1 << 5 IPNetValueType FieldValueType = 1 << 6 + RangeValueType FieldValueType = 1 << 7 ) // MarshalJSON returns the JSON encoding of the FieldValueType @@ -51,6 +52,8 @@ func (t FieldValueType) String() string { return "variable" case IPNetValueType: return "ip" + case RangeValueType: + return "range" } return "" diff --git a/pkg/security/secl/compiler/eval/state.go b/pkg/security/secl/compiler/eval/state.go index 2a8793fd1e19a..d7273496b2ff3 100644 --- a/pkg/security/secl/compiler/eval/state.go +++ b/pkg/security/secl/compiler/eval/state.go @@ -55,7 +55,7 @@ func (s *State) UpdateFieldValues(field Field, value FieldValue) error { for _, v := range values { // compare only comparable switch v.Value.(type) { - case int, uint, int64, uint64, string: + case int, uint, int64, uint64, string, bool: if v == value { return nil } diff --git a/pkg/security/secl/compiler/generators/operators/operators.go b/pkg/security/secl/compiler/generators/operators/operators.go index f826fe42cb76d..36748ff062670 100644 --- a/pkg/security/secl/compiler/generators/operators/operators.go +++ b/pkg/security/secl/compiler/generators/operators/operators.go @@ -28,6 +28,7 @@ type Operator struct { ArrayType string ValueType string Commutative bool + RangeLimit string } func main() { @@ -360,7 +361,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: stdCompare(">"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "GreaterOrEqualThan", @@ -369,7 +370,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: stdCompare(">="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "LesserThan", @@ -378,7 +379,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: stdCompare("<"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "LesserOrEqualThan", @@ -387,7 +388,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: stdCompare("<="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationLesserThan", @@ -396,7 +397,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompare("<"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationLesserOrEqualThan", @@ -405,7 +406,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompare("<="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationGreaterThan", @@ -414,7 +415,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompare(">"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationGreaterOrEqualThan", @@ -423,7 +424,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompare(">="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationEqual", @@ -441,7 +442,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompareArithmeticOperation("<"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationLesserOrEqualThanArithmeticOperation", @@ -450,7 +451,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompareArithmeticOperation("<="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationGreaterThanArithmeticOperation", @@ -459,7 +460,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompareArithmeticOperation(">"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationGreaterOrEqualThanArithmeticOperation", @@ -468,7 +469,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompareArithmeticOperation(">="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationEqualArithmeticOperation", @@ -509,7 +510,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: stdCompare(">"), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "IntArrayGreaterOrEqualThan", @@ -519,7 +520,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: stdCompare(">="), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "IntArrayLesserThan", @@ -529,7 +530,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: stdCompare("<"), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "IntArrayLesserOrEqualThan", @@ -539,7 +540,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: stdCompare("<="), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationArrayLesserThan", @@ -549,7 +550,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: durationCompare("<"), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationArrayLesserOrEqualThan", @@ -559,7 +560,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: durationCompare("<="), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationArrayGreaterThan", @@ -569,7 +570,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: durationCompare(">"), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationArrayGreaterOrEqualThan", @@ -579,7 +580,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: durationCompare(">="), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, }, } diff --git a/pkg/security/secl/containerutils/cgroup.go b/pkg/security/secl/containerutils/cgroup.go index 734fc74438526..fe9c444a06f04 100644 --- a/pkg/security/secl/containerutils/cgroup.go +++ b/pkg/security/secl/containerutils/cgroup.go @@ -46,6 +46,7 @@ var RuntimePrefixes = map[string]CGroupManager{ // GetCGroupManager extracts the cgroup manager from a cgroup name func GetCGroupManager(cgroup string) (string, CGroupFlags) { + cgroup = strings.TrimLeft(cgroup, "/") for runtimePrefix, runtimeFlag := range RuntimePrefixes { if strings.HasPrefix(cgroup, runtimePrefix) { return cgroup[:len(runtimePrefix)], CGroupFlags(runtimeFlag) @@ -56,6 +57,7 @@ func GetCGroupManager(cgroup string) (string, CGroupFlags) { // GetContainerFromCgroup extracts the container ID from a cgroup name func GetContainerFromCgroup(cgroup string) (string, CGroupFlags) { + cgroup = strings.TrimLeft(cgroup, "/") for runtimePrefix, runtimeFlag := range RuntimePrefixes { if strings.HasPrefix(cgroup, runtimePrefix) { return cgroup[len(runtimePrefix):], CGroupFlags(runtimeFlag) @@ -67,7 +69,7 @@ func GetContainerFromCgroup(cgroup string) (string, CGroupFlags) { // GetCgroupFromContainer infers the container runtime from a cgroup name func GetCgroupFromContainer(id ContainerID, flags CGroupFlags) CGroupID { for runtimePrefix, runtimeFlag := range RuntimePrefixes { - if uint64(flags)&0b111 == uint64(runtimeFlag) { + if flags&CGroupManagerMask == CGroupFlags(runtimeFlag) { return CGroupID(runtimePrefix + string(id)) } } diff --git a/pkg/security/secl/containerutils/helpers.go b/pkg/security/secl/containerutils/helpers.go index ce2d8c7d3d24b..701e4f9cd1d9f 100644 --- a/pkg/security/secl/containerutils/helpers.go +++ b/pkg/security/secl/containerutils/helpers.go @@ -80,7 +80,7 @@ func FindContainerID(s string) (string, uint64) { // GetCGroupContext returns the cgroup ID and the sanitized container ID from a container id/flags tuple func GetCGroupContext(containerID ContainerID, cgroupFlags CGroupFlags) (CGroupID, ContainerID) { cgroupID := GetCgroupFromContainer(containerID, cgroupFlags) - if cgroupFlags&0b111 == 0 { + if !cgroupFlags.IsContainer() { containerID = "" } return CGroupID(cgroupID), ContainerID(containerID) diff --git a/pkg/security/secl/containerutils/types.go b/pkg/security/secl/containerutils/types.go index fbda75c7caebb..bc55b8539aec0 100644 --- a/pkg/security/secl/containerutils/types.go +++ b/pkg/security/secl/containerutils/types.go @@ -14,3 +14,11 @@ type CGroupID string // CGroupFlags represents the flags of a cgroup type CGroupFlags uint64 + +// CGroupManagerMask holds the bitmask for the cgroup manager +const CGroupManagerMask CGroupFlags = 0b111 + +// IsContainer returns whether a cgroup maps to a container +func (f CGroupFlags) IsContainer() bool { + return (f&CGroupManagerMask != 0) && ((f & CGroupManagerMask) != CGroupFlags(CGroupManagerSystemd)) +} diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index 22d7931b1557d..a46de50a3a0b7 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -16,12 +16,14 @@ require ( github.com/skydive-project/go-debouncer v1.0.0 github.com/spf13/cast v1.7.0 github.com/stretchr/testify v1.9.0 - golang.org/x/sys v0.24.0 - golang.org/x/text v0.17.0 - golang.org/x/tools v0.24.0 + github.com/xeipuuv/gojsonschema v1.2.0 + golang.org/x/sys v0.25.0 + golang.org/x/text v0.18.0 + golang.org/x/tools v0.25.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 modernc.org/mathutil v1.6.0 + sigs.k8s.io/yaml v1.4.0 ) require ( @@ -35,8 +37,10 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/shopspring/decimal v1.4.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect golang.org/x/crypto v0.26.0 // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect ) diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index d111f0dc627d3..d5c938d6789d2 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -18,6 +18,7 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -56,23 +57,30 @@ github.com/skydive-project/go-debouncer v1.0.0/go.mod h1:7pK+5HBlYCD8W2cXhvMRsMs github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -83,3 +91,5 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/security/secl/model/field_handlers_unix.go b/pkg/security/secl/model/field_handlers_unix.go index 80efe8248372b..32ff972cfee5c 100644 --- a/pkg/security/secl/model/field_handlers_unix.go +++ b/pkg/security/secl/model/field_handlers_unix.go @@ -36,7 +36,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveService(ev, &ev.BaseEvent) _ = ev.FieldHandlers.ResolveEventTimestamp(ev, &ev.BaseEvent) _ = ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.NetworkContext.Device) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.BaseEvent.ProcessContext.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.BaseEvent.ProcessContext.Process) + } _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.BaseEvent.ProcessContext.Process) _ = ev.FieldHandlers.ResolveProcessArgv(ev, &ev.BaseEvent.ProcessContext.Process) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.BaseEvent.ProcessContext.Process) @@ -112,7 +114,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) } if ev.BaseEvent.ProcessContext.HasParent() { - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.BaseEvent.ProcessContext.Parent) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.BaseEvent.ProcessContext.Parent) + } } if ev.BaseEvent.ProcessContext.HasParent() { _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.BaseEvent.ProcessContext.Parent) @@ -358,7 +362,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveK8SUID(ev, &ev.Exec.Process.UserSession) _ = ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Exec.Process.UserSession) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Exec.Process) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exec.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exec.Process) + } _ = ev.FieldHandlers.ResolveProcessArgv(ev, ev.Exec.Process) _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Exec.Process) _ = ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exec.Process) @@ -440,7 +446,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveK8SUID(ev, &ev.Exit.Process.UserSession) _ = ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Exit.Process.UserSession) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Exit.Process) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exit.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exit.Process) + } _ = ev.FieldHandlers.ResolveProcessArgv(ev, ev.Exit.Process) _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Exit.Process) _ = ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exit.Process) @@ -638,7 +646,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveK8SUID(ev, &ev.PTrace.Tracee.Process.UserSession) _ = ev.FieldHandlers.ResolveK8SGroups(ev, &ev.PTrace.Tracee.Process.UserSession) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.PTrace.Tracee.Process) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.PTrace.Tracee.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.PTrace.Tracee.Process) + } _ = ev.FieldHandlers.ResolveProcessArgv(ev, &ev.PTrace.Tracee.Process) _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.PTrace.Tracee.Process) _ = ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.PTrace.Tracee.Process) @@ -733,7 +743,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveProcessArgv0(ev, ev.PTrace.Tracee.Parent) } if ev.PTrace.Tracee.HasParent() { - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.PTrace.Tracee.Parent) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.PTrace.Tracee.Parent) + } } if ev.PTrace.Tracee.HasParent() { _ = ev.FieldHandlers.ResolveProcessArgv(ev, ev.PTrace.Tracee.Parent) @@ -907,7 +919,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveK8SUID(ev, &ev.Signal.Target.Process.UserSession) _ = ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Signal.Target.Process.UserSession) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.Signal.Target.Process) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.Signal.Target.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.Signal.Target.Process) + } _ = ev.FieldHandlers.ResolveProcessArgv(ev, &ev.Signal.Target.Process) _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.Signal.Target.Process) _ = ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.Signal.Target.Process) @@ -1002,7 +1016,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Signal.Target.Parent) } if ev.Signal.Target.HasParent() { - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Signal.Target.Parent) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Signal.Target.Parent) + } } if ev.Signal.Target.HasParent() { _ = ev.FieldHandlers.ResolveProcessArgv(ev, ev.Signal.Target.Parent) diff --git a/pkg/security/secl/model/model.go b/pkg/security/secl/model/model.go index 77e44b0b6a86d..d8aebe7770aa5 100644 --- a/pkg/security/secl/model/model.go +++ b/pkg/security/secl/model/model.go @@ -313,8 +313,9 @@ type MatchedRule struct { // ActionReport defines an action report type ActionReport interface { - ToJSON() ([]byte, bool, error) + ToJSON() ([]byte, error) IsMatchingRule(ruleID eval.RuleID) bool + IsResolved() bool } // NewMatchedRule return a new MatchedRule instance diff --git a/pkg/security/secl/model/model_unix.go b/pkg/security/secl/model/model_unix.go index 65cd8f413d90f..491032e18ee6c 100644 --- a/pkg/security/secl/model/model_unix.go +++ b/pkg/security/secl/model/model_unix.go @@ -13,9 +13,10 @@ package model import ( "time" + "modernc.org/mathutil" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" - "modernc.org/mathutil" ) // Event represents an event sent from the kernel @@ -238,7 +239,7 @@ type Process struct { // defined to generate accessors, ArgsTruncated and EnvsTruncated are used during by unmarshaller Argv0 string `field:"argv0,handler:ResolveProcessArgv0,weight:100"` // SECLDoc[argv0] Definition:`First argument of the process` - Args string `field:"args,handler:ResolveProcessArgs,weight:500"` // SECLDoc[args] Definition:`Arguments of the process (as a string, excluding argv0)` Example:`exec.args == "-sV -p 22,53,110,143,4564 198.116.0-255.1-127"` Description:`Matches any process with these exact arguments.` Example:`exec.args =~ "* -F * http*"` Description:`Matches any process that has the "-F" argument anywhere before an argument starting with "http".` + Args string `field:"args,handler:ResolveProcessArgs,weight:500,opts:skip_ad"` // SECLDoc[args] Definition:`Arguments of the process (as a string, excluding argv0)` Example:`exec.args == "-sV -p 22,53,110,143,4564 198.116.0-255.1-127"` Description:`Matches any process with these exact arguments.` Example:`exec.args =~ "* -F * http*"` Description:`Matches any process that has the "-F" argument anywhere before an argument starting with "http".` Argv []string `field:"argv,handler:ResolveProcessArgv,weight:500; cmdargv,handler:ResolveProcessCmdArgv,opts:getters_only; args_flags,handler:ResolveProcessArgsFlags,opts:helper; args_options,handler:ResolveProcessArgsOptions,opts:helper"` // SECLDoc[argv] Definition:`Arguments of the process (as an array, excluding argv0)` Example:`exec.argv in ["127.0.0.1"]` Description:`Matches any process that has this IP address as one of its arguments.` SECLDoc[args_flags] Definition:`Flags in the process arguments` Example:`exec.args_flags in ["s"] && exec.args_flags in ["V"]` Description:`Matches any process with both "-s" and "-V" flags in its arguments. Also matches "-sV".` SECLDoc[args_options] Definition:`Argument of the process as options` Example:`exec.args_options in ["p=0-1024"]` Description:`Matches any process that has either "-p 0-1024" or "--p=0-1024" in its arguments.` ArgsTruncated bool `field:"args_truncated,handler:ResolveProcessArgsTruncated"` // SECLDoc[args_truncated] Definition:`Indicator of arguments truncation` Envs []string `field:"envs,handler:ResolveProcessEnvs,weight:100"` // SECLDoc[envs] Definition:`Environment variable names of the process` @@ -602,7 +603,9 @@ type CgroupTracingEvent struct { // CgroupWriteEvent is used to signal that a new cgroup was created type CgroupWriteEvent struct { - File FileEvent `field:"file"` // Path to the cgroup + File FileEvent `field:"file"` // Path to the cgroup + Pid uint32 `field:"-"` // PID of the process added to the cgroup + CGroupFlags uint32 `field:"-"` // CGroup flags } // ActivityDumpLoadConfig represents the load configuration of an activity dump diff --git a/pkg/security/secl/model/unmarshallers_linux.go b/pkg/security/secl/model/unmarshallers_linux.go index b29f7ecd55284..93ea0f6284d23 100644 --- a/pkg/security/secl/model/unmarshallers_linux.go +++ b/pkg/security/secl/model/unmarshallers_linux.go @@ -979,7 +979,7 @@ func (e *CgroupTracingEvent) UnmarshalBinary(data []byte) (int, error) { } cursor += read - if len(data)-cursor < 4 { + if len(data)-cursor < 8 { return 0, ErrNotEnoughData } @@ -994,6 +994,16 @@ func (e *CgroupWriteEvent) UnmarshalBinary(data []byte) (int, error) { return 0, err } + if len(data)-read < 8 { + return 0, ErrNotEnoughData + } + + e.Pid = binary.NativeEndian.Uint32(data[read : read+4]) + read += 4 + + e.CGroupFlags = binary.NativeEndian.Uint32(data[read : read+4]) + read += 4 + return read, nil } diff --git a/pkg/security/secl/rules/approvers.go b/pkg/security/secl/rules/approvers.go index 7e470d55684a7..3f3bc89c69c33 100644 --- a/pkg/security/secl/rules/approvers.go +++ b/pkg/security/secl/rules/approvers.go @@ -8,6 +8,7 @@ package rules import ( "errors" + "math" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" ) @@ -15,8 +16,7 @@ import ( // Approvers are just filter values indexed by field type Approvers map[eval.Field]FilterValues -// isAnApprover returns whether the given value is an approver for the given rule -func isAnApprover(event eval.Event, ctx *eval.Context, rule *Rule, field eval.Field, value interface{}) (bool, error) { +func partialEval(event eval.Event, ctx *eval.Context, rule *Rule, field eval.Field, value interface{}) (bool, error) { var readOnlyError *eval.ErrFieldReadOnly if err := event.SetFieldValue(field, value); err != nil { if errors.As(err, &readOnlyError) { @@ -24,28 +24,141 @@ func isAnApprover(event eval.Event, ctx *eval.Context, rule *Rule, field eval.Fi } return false, err } - origResult, err := rule.PartialEval(ctx, field) + return rule.PartialEval(ctx, field) +} + +func isAnIntLesserEqualThanApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, value interface{}) (bool, interface{}, error) { + min := math.MinInt + if fieldCap.RangeFilterValue != nil { + min = fieldCap.RangeFilterValue.Min + } + + maxResult, err := partialEval(event, ctx, rule, fieldCap.Field, value) if err != nil { - return false, err + return false, RangeFilterValue{}, err + } + if !maxResult { + return false, RangeFilterValue{}, nil } - notValue, err := eval.NotOfValue(value) + result, err := partialEval(event, ctx, rule, fieldCap.Field, value.(int)+1) + return !result, RangeFilterValue{Min: min, Max: value.(int)}, err +} + +func isAnIntLesserThanApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, value interface{}) (bool, interface{}, error) { + min := math.MinInt + if fieldCap.RangeFilterValue != nil { + min = fieldCap.RangeFilterValue.Min + } + + maxResult, err := partialEval(event, ctx, rule, fieldCap.Field, value.(int)-1) if err != nil { - return false, err + return false, RangeFilterValue{}, err + } + if !maxResult { + return false, RangeFilterValue{}, nil } - if err := event.SetFieldValue(field, notValue); err != nil { - if errors.As(err, &readOnlyError) { + result, err := partialEval(event, ctx, rule, fieldCap.Field, value) + return !result, RangeFilterValue{Min: min, Max: value.(int) - 1}, err +} + +func isAnIntGreaterEqualThanApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, value interface{}) (bool, interface{}, error) { + max := math.MaxInt + if fieldCap.RangeFilterValue != nil { + max = fieldCap.RangeFilterValue.Max + } + + minResult, err := partialEval(event, ctx, rule, fieldCap.Field, value) + if err != nil { + return false, RangeFilterValue{}, err + } + if !minResult { + return false, RangeFilterValue{}, nil + } + + result, err := partialEval(event, ctx, rule, fieldCap.Field, value.(int)-1) + return !result, RangeFilterValue{Min: value.(int), Max: max}, err +} + +func isAnIntGreaterThanApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, value interface{}) (bool, interface{}, error) { + max := math.MaxInt + if fieldCap.RangeFilterValue != nil { + max = fieldCap.RangeFilterValue.Max + } + + minResult, err := partialEval(event, ctx, rule, fieldCap.Field, value.(int)+1) + if err != nil { + return false, RangeFilterValue{}, err + } + if !minResult { + return false, RangeFilterValue{}, nil + } + + result, err := partialEval(event, ctx, rule, fieldCap.Field, value) + return !result, RangeFilterValue{Min: value.(int) + 1, Max: max}, err +} + +// isAnApprover returns whether the given value is an approver for the given rule +func isAnApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, fieldValueType eval.FieldValueType, value interface{}) (bool, eval.FieldValueType, interface{}, error) { + if fieldValueType == eval.RangeValueType { + isAnApprover, approverValue, err := isAnIntLesserEqualThanApprover(event, ctx, rule, fieldCap, value) + if isAnApprover || err != nil { + return isAnApprover, eval.RangeValueType, approverValue, err + } + isAnApprover, approverValue, err = isAnIntLesserThanApprover(event, ctx, rule, fieldCap, value) + if isAnApprover || err != nil { + return isAnApprover, eval.RangeValueType, approverValue, err + } + isAnApprover, approverValue, err = isAnIntGreaterEqualThanApprover(event, ctx, rule, fieldCap, value) + if isAnApprover || err != nil { + return isAnApprover, eval.RangeValueType, approverValue, err + } + isAnApprover, approverValue, err = isAnIntGreaterThanApprover(event, ctx, rule, fieldCap, value) + if isAnApprover || err != nil { + return isAnApprover, eval.RangeValueType, approverValue, err + } + } + + isaaFnc := func(v1, v2 interface{}) (bool, error) { + origResult, err := partialEval(event, ctx, rule, fieldCap.Field, v1) + if err != nil { + return false, err + } + if !origResult { return false, nil } - return false, err + + notResult, err := partialEval(event, ctx, rule, fieldCap.Field, v2) + if err != nil { + return false, err + } + return origResult != notResult, nil } - notResult, err := rule.PartialEval(ctx, field) + + notValue, err := eval.NotOfValue(value) if err != nil { - return false, err + return false, fieldValueType, value, err + } + + result, err := isaaFnc(value, notValue) + if result || err != nil { + return result, fieldValueType, value, err } - return origResult && !notResult, nil + if fieldCap.HandleNotApproverValue == nil { + return false, fieldValueType, value, err + } + + result, err = isaaFnc(notValue, value) + if result { + fieldValueType, value, ok := fieldCap.HandleNotApproverValue(fieldValueType, value) + if ok { + return true, fieldValueType, value, nil + } + } + + return false, fieldValueType, value, err } func bitmaskCombinations(bitmasks []int) []int { @@ -96,7 +209,6 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) ctx := eval.NewContext(event) - // for each rule we should at least find one approver otherwise we will return no approver for the field for _, rule := range rules { var ( bestFilterField eval.Field @@ -105,6 +217,7 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) bestFilterMode FilterMode ) + LOOP: for _, fieldCap := range fieldCaps { field := fieldCap.Field @@ -112,15 +225,27 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) var bitmasks []int for _, value := range rule.GetFieldValues(field) { + // TODO: handle range for bitmask field, for now ignore range value + if fieldCap.TypeBitmask&eval.BitmaskValueType == eval.BitmaskValueType && value.Type == eval.RangeValueType { + continue + } + + if !fieldCap.TypeMatches(value.Type) { + continue LOOP + } + switch value.Type { - case eval.ScalarValueType, eval.PatternValueType, eval.GlobValueType: - isAnApprover, err := isAnApprover(event, ctx, rule, field, value.Value) + case eval.ScalarValueType, eval.PatternValueType, eval.GlobValueType, eval.RangeValueType: + isAnApprover, approverValueType, approverValue, err := isAnApprover(event, ctx, rule, fieldCap, value.Type, value.Value) if err != nil { return nil, err } - if isAnApprover { - filterValues = filterValues.Merge(FilterValue{Field: field, Value: value.Value, Type: value.Type, Mode: fieldCap.FilterMode}) + filterValue := FilterValue{Field: field, Value: approverValue, Type: approverValueType, Mode: fieldCap.FilterMode} + if !fieldCap.Validate(filterValue) { + continue LOOP + } + filterValues = filterValues.Merge(filterValue) } case eval.BitmaskValueType: bitmasks = append(bitmasks, value.Value.(int)) @@ -128,17 +253,21 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) } for _, bitmask := range bitmaskCombinations(bitmasks) { - isAnApprover, err := isAnApprover(event, ctx, rule, field, bitmask) + isAnApprover, _, _, err := isAnApprover(event, ctx, rule, fieldCap, eval.BitmaskValueType, bitmask) if err != nil { return nil, err } if isAnApprover { - filterValues = filterValues.Merge(FilterValue{Field: field, Value: bitmask, Type: eval.BitmaskValueType}) + filterValue := FilterValue{Field: field, Value: bitmask, Type: eval.BitmaskValueType} + if !fieldCap.Validate(filterValue) { + continue LOOP + } + filterValues = filterValues.Merge(filterValue) } } - if len(filterValues) == 0 || !fieldCaps.Validate(filterValues) { + if len(filterValues) == 0 { continue } diff --git a/pkg/security/secl/rules/capabilities.go b/pkg/security/secl/rules/capabilities.go index fc9bea2221953..98948f6f00476 100644 --- a/pkg/security/secl/rules/capabilities.go +++ b/pkg/security/secl/rules/capabilities.go @@ -25,45 +25,40 @@ type FieldCapabilities []FieldCapability // FieldCapability represents a field and the type of its value (scalar, pattern, bitmask, ...) type FieldCapability struct { - Field eval.Field - TypeBitmask eval.FieldValueType - ValidateFnc func(FilterValue) bool - FilterWeight int - FilterMode FilterMode + Field eval.Field + TypeBitmask eval.FieldValueType + ValidateFnc func(FilterValue) bool + FilterWeight int + FilterMode FilterMode + RangeFilterValue *RangeFilterValue + HandleNotApproverValue func(valueType eval.FieldValueType, value interface{}) (eval.FieldValueType, interface{}, bool) } -// GetFields returns all the fields of FieldCapabilities -func (fcs FieldCapabilities) GetFields() []eval.Field { - var fields []eval.Field - for _, fc := range fcs { - fields = append(fields, fc.Field) - } - return fields +// TypeMatches return if a type is supported +func (fc FieldCapability) TypeMatches(kind eval.FieldValueType) bool { + return kind&fc.TypeBitmask != 0 } -// Validate ensures all the filter values match field capabilities -func (fcs FieldCapabilities) Validate(filterValues FilterValues) bool { - for _, filterValue := range filterValues { - var found bool - for _, fc := range fcs { - if filterValue.Field != fc.Field || filterValue.Type&fc.TypeBitmask == 0 { - continue - } - - if fc.ValidateFnc != nil { - if !fc.ValidateFnc(filterValue) { - continue - } - } - - found = true - break - } +// Validate validate the filter value +func (fc FieldCapability) Validate(filterValue FilterValue) bool { + if filterValue.Field != fc.Field || !fc.TypeMatches(filterValue.Type) { + return false + } - if !found { + if fc.ValidateFnc != nil { + if !fc.ValidateFnc(filterValue) { return false } } return true } + +// GetFields returns all the fields of FieldCapabilities +func (fcs FieldCapabilities) GetFields() []eval.Field { + var fields []eval.Field + for _, fc := range fcs { + fields = append(fields, fc.Field) + } + return fields +} diff --git a/pkg/security/secl/rules/errors.go b/pkg/security/secl/rules/errors.go index 8440b00eed692..8cbbc71a06ddc 100644 --- a/pkg/security/secl/rules/errors.go +++ b/pkg/security/secl/rules/errors.go @@ -186,3 +186,13 @@ type ErrFieldNotAvailable struct { func (e *ErrFieldNotAvailable) Error() string { return fmt.Sprintf("field `%s` not available for event type `%v`, available for `%v`", e.Field, e.EventType, e.RestrictedTo) } + +// ErrActionNotAvailable is returned when an action is not available +type ErrActionNotAvailable struct { + ActionName string + EventType eval.EventType +} + +func (e *ErrActionNotAvailable) Error() string { + return fmt.Sprintf("action `%s` not available for event type `%v`", e.ActionName, e.EventType) +} diff --git a/pkg/security/secl/rules/filter_values.go b/pkg/security/secl/rules/filter_values.go index 7c0e6fd7a2fa7..58defbc125a3a 100644 --- a/pkg/security/secl/rules/filter_values.go +++ b/pkg/security/secl/rules/filter_values.go @@ -34,3 +34,9 @@ LOOP: return fv } + +// RangeFilterValue defines a range value +type RangeFilterValue struct { + Min int + Max int +} diff --git a/pkg/security/secl/rules/model.go b/pkg/security/secl/rules/model.go index d347ec32a895f..b792eeb292b30 100644 --- a/pkg/security/secl/rules/model.go +++ b/pkg/security/secl/rules/model.go @@ -41,18 +41,18 @@ const ( // OverrideOptions defines combine options type OverrideOptions struct { - Fields []OverrideField `yaml:"fields"` + Fields []OverrideField `yaml:"fields" json:"fields" jsonschema:"enum=all,enum=expression,enum=actions,enum=every,enum=tags"` } // MacroDefinition holds the definition of a macro type MacroDefinition struct { - ID MacroID `yaml:"id"` - Expression string `yaml:"expression"` - Description string `yaml:"description"` - AgentVersionConstraint string `yaml:"agent_version"` - Filters []string `yaml:"filters"` - Values []string `yaml:"values"` - Combine CombinePolicy `yaml:"combine"` + ID MacroID `yaml:"id" json:"id"` + Expression string `yaml:"expression" json:"expression,omitempty" jsonschema:"oneof_required=MacroWithExpression"` + Description string `yaml:"description" json:"description,omitempty"` + AgentVersionConstraint string `yaml:"agent_version" json:"agent_version,omitempty"` + Filters []string `yaml:"filters" json:"filters,omitempty"` + Values []string `yaml:"values" json:"values,omitempty" jsonschema:"oneof_required=MacroWithValues"` + Combine CombinePolicy `yaml:"combine" json:"combine,omitempty" jsonschema:"enum=merge,enum=override"` } // RuleID represents the ID of a rule @@ -60,20 +60,21 @@ type RuleID = string // RuleDefinition holds the definition of a rule type RuleDefinition struct { - ID RuleID `yaml:"id"` - Version string `yaml:"version"` - Expression string `yaml:"expression"` - Description string `yaml:"description"` - Tags map[string]string `yaml:"tags"` - AgentVersionConstraint string `yaml:"agent_version"` - Filters []string `yaml:"filters"` - Disabled bool `yaml:"disabled"` - Combine CombinePolicy `yaml:"combine"` - OverrideOptions OverrideOptions `yaml:"override_options"` - Actions []*ActionDefinition `yaml:"actions"` - Every time.Duration `yaml:"every"` - Silent bool `yaml:"silent"` - GroupID string `yaml:"group_id"` + ID RuleID `yaml:"id,omitempty" json:"id"` + Version string `yaml:"version,omitempty" json:"version,omitempty"` + Expression string `yaml:"expression" json:"expression,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Tags map[string]string `yaml:"tags,omitempty" json:"tags,omitempty"` + AgentVersionConstraint string `yaml:"agent_version,omitempty" json:"agent_version,omitempty"` + Filters []string `yaml:"filters,omitempty" json:"filters,omitempty"` + Disabled bool `yaml:"disabled,omitempty" json:"disabled,omitempty"` + Combine CombinePolicy `yaml:"combine,omitempty" json:"combine,omitempty" jsonschema:"enum=override"` + OverrideOptions OverrideOptions `yaml:"override_options,omitempty" json:"override_options,omitempty"` + Actions []*ActionDefinition `yaml:"actions,omitempty" json:"actions,omitempty"` + Every time.Duration `yaml:"every,omitempty" json:"every,omitempty"` + RateLimiterToken []string `yaml:"limiter_token,omitempty" json:"limiter_token,omitempty"` + Silent bool `yaml:"silent,omitempty" json:"silent,omitempty"` + GroupID string `yaml:"group_id,omitempty" json:"group_id,omitempty"` } // GetTag returns the tag value associated with a tag key @@ -89,17 +90,39 @@ func (rd *RuleDefinition) GetTag(tagKey string) (string, bool) { type ActionName = string const ( - // KillAction name a the kill action + // KillAction name of the kill action KillAction ActionName = "kill" + // SetAction name of the set action + SetAction ActionName = "set" + // CoreDumpAction name of the core dump action + CoreDumpAction ActionName = "coredump" + // HashAction name of the hash action + HashAction ActionName = "hash" ) // ActionDefinition describes a rule action section type ActionDefinition struct { - Filter *string `yaml:"filter"` - Set *SetDefinition `yaml:"set"` - Kill *KillDefinition `yaml:"kill"` - CoreDump *CoreDumpDefinition `yaml:"coredump"` - Hash *HashDefinition `yaml:"hash"` + Filter *string `yaml:"filter" json:"filter,omitempty"` + Set *SetDefinition `yaml:"set" json:"set,omitempty" jsonschema:"oneof_required=SetAction"` + Kill *KillDefinition `yaml:"kill" json:"kill,omitempty" jsonschema:"oneof_required=KillAction"` + CoreDump *CoreDumpDefinition `yaml:"coredump" json:"coredump,omitempty" jsonschema:"oneof_required=CoreDumpAction"` + Hash *HashDefinition `yaml:"hash" json:"hash,omitempty" jsonschema:"oneof_required=HashAction"` +} + +// Name returns the name of the action +func (a *ActionDefinition) Name() ActionName { + switch { + case a.Set != nil: + return SetAction + case a.Kill != nil: + return KillAction + case a.CoreDump != nil: + return CoreDumpAction + case a.Hash != nil: + return HashAction + default: + return "" + } } // Scope describes the scope variables @@ -107,27 +130,40 @@ type Scope string // SetDefinition describes the 'set' section of a rule action type SetDefinition struct { - Name string `yaml:"name"` - Value interface{} `yaml:"value"` - Field string `yaml:"field"` - Append bool `yaml:"append"` - Scope Scope `yaml:"scope"` - Size int `yaml:"size"` - TTL time.Duration `yaml:"ttl"` + Name string `yaml:"name" json:"name"` + Value interface{} `yaml:"value" json:"value,omitempty" jsonschema:"oneof_required=SetWithValue"` + Field string `yaml:"field" json:"field,omitempty" jsonschema:"oneof_required=SetWithField"` + Append bool `yaml:"append" json:"append,omitempty"` + Scope Scope `yaml:"scope" json:"scope,omitempty" jsonschema:"enum=process,enum=container"` + Size int `yaml:"size" json:"size,omitempty"` + TTL time.Duration `yaml:"ttl" json:"ttl,omitempty"` +} + +// KillDisarmerParamsDefinition describes the parameters of a kill action disarmer +type KillDisarmerParamsDefinition struct { + MaxAllowed int `yaml:"max_allowed" json:"max_allowed,omitempty" jsonschema:"description=The maximum number of allowed kill actions within the period,example=5"` + Period time.Duration `yaml:"period" json:"period,omitempty" jsonschema:"description=The period of time during which the maximum number of allowed kill actions is calculated,example=1m"` +} + +// KillDisarmerDefinition describes the 'disarmer' section of a kill action +type KillDisarmerDefinition struct { + Container *KillDisarmerParamsDefinition `yaml:"container" json:"container,omitempty"` + Executable *KillDisarmerParamsDefinition `yaml:"executable" json:"executable,omitempty"` } // KillDefinition describes the 'kill' section of a rule action type KillDefinition struct { - Signal string `yaml:"signal"` - Scope string `yaml:"scope"` + Signal string `yaml:"signal" json:"signal" jsonschema:"description=A valid signal name,example=SIGKILL,example=SIGTERM"` + Scope string `yaml:"scope" json:"scope,omitempty" jsonschema:"enum=process,enum=container"` + Disarmer *KillDisarmerDefinition `yaml:"disarmer" json:"disarmer,omitempty"` } // CoreDumpDefinition describes the 'coredump' action type CoreDumpDefinition struct { - Process bool `yaml:"process"` - Mount bool `yaml:"mount"` - Dentry bool `yaml:"dentry"` - NoCompression bool `yaml:"no_compression"` + Process bool `yaml:"process" json:"process,omitempty" jsonschema:"anyof_required=CoreDumpWithProcess"` + Mount bool `yaml:"mount" json:"mount,omitempty" jsonschema:"anyof_required=CoreDumpWithMount"` + Dentry bool `yaml:"dentry" json:"dentry,omitempty" jsonschema:"anyof_required=CoreDumpWithDentry"` + NoCompression bool `yaml:"no_compression" json:"no_compression,omitempty"` } // HashDefinition describes the 'hash' section of a rule action @@ -135,21 +171,21 @@ type HashDefinition struct{} // OnDemandHookPoint represents a hook point definition type OnDemandHookPoint struct { - Name string `yaml:"name"` - IsSyscall bool `yaml:"syscall"` - Args []HookPointArg `yaml:"args"` + Name string `yaml:"name" json:"name"` + IsSyscall bool `yaml:"syscall" json:"syscall,omitempty"` + Args []HookPointArg `yaml:"args" json:"args,omitempty"` } // HookPointArg represents the definition of a hook point argument type HookPointArg struct { - N int `yaml:"n"` - Kind string `yaml:"kind"` + N int `yaml:"n" json:"n" jsonschema:"description=Zero-based argument index"` + Kind string `yaml:"kind" json:"kind" jsonschema:"enum=uint,enum=null-terminated-string"` } // PolicyDef represents a policy file definition type PolicyDef struct { - Version string `yaml:"version"` - Macros []*MacroDefinition `yaml:"macros"` - Rules []*RuleDefinition `yaml:"rules"` - OnDemandHookPoints []OnDemandHookPoint `yaml:"hooks"` + Version string `yaml:"version,omitempty" json:"version"` + Macros []*MacroDefinition `yaml:"macros,omitempty" json:"macros,omitempty"` + Rules []*RuleDefinition `yaml:"rules" json:"rules"` + OnDemandHookPoints []OnDemandHookPoint `yaml:"hooks,omitempty" json:"hooks,omitempty"` } diff --git a/pkg/security/secl/rules/opts.go b/pkg/security/secl/rules/opts.go index 91f1c40f42b94..d6aea4a1dacf3 100644 --- a/pkg/security/secl/rules/opts.go +++ b/pkg/security/secl/rules/opts.go @@ -20,6 +20,9 @@ type VariableProvider interface { // VariableProviderFactory describes a function called to instantiate a variable provider type VariableProviderFactory func() VariableProvider +// RuleActionPerformedCb describes the callback function called after a rule action is performed +type RuleActionPerformedCb func(r *Rule, action *ActionDefinition) + // Opts defines rules set options type Opts struct { SupportedDiscarders map[eval.Field]bool @@ -28,6 +31,7 @@ type Opts struct { EventTypeEnabled map[eval.EventType]bool StateScopes map[Scope]VariableProviderFactory Logger log.Logger + ruleActionPerformedCb RuleActionPerformedCb } // WithSupportedDiscarders set supported discarders @@ -66,6 +70,12 @@ func (o *Opts) WithStateScopes(stateScopes map[Scope]VariableProviderFactory) *O return o } +// WithRuleActionPerformedCb sets the rule action performed callback +func (o *Opts) WithRuleActionPerformedCb(cb RuleActionPerformedCb) *Opts { + o.ruleActionPerformedCb = cb + return o +} + // NewRuleOpts returns rule options func NewRuleOpts(eventTypeEnabled map[eval.EventType]bool) *Opts { var ruleOpts Opts diff --git a/pkg/security/secl/rules/policy_loader.go b/pkg/security/secl/rules/policy_loader.go index 07d2b19c44c63..9794781fa2bea 100644 --- a/pkg/security/secl/rules/policy_loader.go +++ b/pkg/security/secl/rules/policy_loader.go @@ -15,9 +15,10 @@ import ( ) const ( - PolicyProviderTypeDir = "file" // PolicyProviderTypeDir defines directory policy provider - PolicyProviderTypeRC = "remote-config" // PolicyProviderTypeRC defines RC policy provider - PolicyProviderTypeBundled = "bundled" // PolicyProviderTypeBundled defines the bundled policy provider + PolicyProviderTypeDir = "file" // PolicyProviderTypeDir defines directory policy provider + PolicyProviderTypeRC = "remote-config" // PolicyProviderTypeRC defines RC policy provider + PolicyProviderTypeBundled = "bundled" // PolicyProviderTypeBundled defines the bundled policy provider + PolicyProviderTypeWorkload = "workload" // PolicyProviderTypeWorkload defines the workload policy provider ) var ( @@ -111,12 +112,13 @@ func (p *PolicyLoader) notifyListeners() { // Close stops the loader func (p *PolicyLoader) Close() { - p.RLock() - defer p.RUnlock() + p.Lock() + defer p.Unlock() for _, ch := range p.listeners { close(ch) } + p.listeners = p.listeners[:0] p.debouncer.Stop() } diff --git a/pkg/security/secl/rules/policy_test.go b/pkg/security/secl/rules/policy_test.go index 112001e8aeea3..286b0fc1036da 100644 --- a/pkg/security/secl/rules/policy_test.go +++ b/pkg/security/secl/rules/policy_test.go @@ -10,6 +10,7 @@ package rules import ( "fmt" + "net/http" "os" "path/filepath" "strings" @@ -18,11 +19,14 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/xeipuuv/gojsonschema" "github.com/Masterminds/semver/v3" "github.com/hashicorp/go-multierror" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" + yamlk8s "sigs.k8s.io/yaml" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" @@ -928,3 +932,187 @@ broken }) } } + +// go test -v github.com/DataDog/datadog-agent/pkg/security/secl/rules --run="TestPolicySchema" +func TestPolicySchema(t *testing.T) { + tests := []struct { + name string + policy string + schemaResultCb func(*testing.T, *gojsonschema.Result) + }{ + { + name: "valid", + policy: policyValid, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + if !assert.True(t, result.Valid(), "schema validation failed") { + for _, err := range result.Errors() { + t.Errorf("%s", err) + } + } + }, + }, + { + name: "missing required rule ID", + policy: policyWithMissingRequiredRuleID, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + require.False(t, result.Valid(), "schema validation should fail") + require.Len(t, result.Errors(), 1) + assert.Contains(t, result.Errors()[0].String(), "id is required") + }, + }, + { + name: "unknown field", + policy: policyWithUnknownField, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + require.False(t, result.Valid(), "schema validation should fail") + require.Len(t, result.Errors(), 1) + assert.Contains(t, result.Errors()[0].String(), "Additional property unknown_field is not allowed") + }, + }, + { + name: "invalid field type", + policy: policyWithInvalidFieldType, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + require.False(t, result.Valid(), "schema validation should fail") + require.Len(t, result.Errors(), 1) + assert.Contains(t, result.Errors()[0].String(), "Invalid type") + + }, + }, + { + name: "multiple actions", + policy: policyWithMultipleActions, + schemaResultCb: func(t *testing.T, result *gojsonschema.Result) { + require.False(t, result.Valid(), "schema validation should fail") + require.Len(t, result.Errors(), 1) + assert.Contains(t, result.Errors()[0].String(), "Must validate one and only one schema") + }, + }, + } + + fs := os.DirFS("../../../../pkg/security/tests/schemas") + schemaLoader := gojsonschema.NewReferenceLoaderFileSystem("file:///policy.schema.json", http.FS(fs)) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + json, err := yamlk8s.YAMLToJSON([]byte(test.policy)) + require.NoErrorf(t, err, "failed to convert yaml to json: %v", err) + documentLoader := gojsonschema.NewBytesLoader(json) + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + require.NoErrorf(t, err, "failed to validate schema: %v", err) + test.schemaResultCb(t, result) + }) + } +} + +const policyValid = ` +version: 1.2.3 +rules: + - id: basic + expression: exec.file.name == "foo" + - id: with_tags + description: Rule with tags + expression: exec.file.name == "foo" + tags: + tagA: a + tagB: b + - id: disabled + description: Disabled rule + expression: exec.file.name == "foo" + disabled: true + - id: with_tags + description: Rule with combine + expression: exec.file.name == "bar" + combine: override + override_options: + fields: + - expression + - id: with_filters + description: Rule with a filter and agent_version field + expression: exec.file.name == "foo" + agent_version: ">= 7.38" + filters: + - os == "linux" + - id: with_every_silent_group_id + description: Rule with a silent/every/group_id field + expression: exec.file.name == "foo" + silent: true + every: 10s + group_id: "baz_group" + - id: with_set_action_with_field + description: Rule with a set action using an event field + expression: exec.file.name == "foo" + actions: + - set: + name: process_names + field: process.file.name + append: true + size: 10 + ttl: 10s + - id: with_set_action_with_value + description: Rule with a set action using a value + expression: exec.file.name == "foo" + actions: + - set: + name: global_var_set + value: true + - id: with_set_action_use + description: Rule using a variable set by a previous action + expression: open.file.path == "/tmp/bar" && ${global_var_set} + - id: with_kill_action + description: Rule with a kill action + expression: exec.file.name == "foo" + actions: + - kill: + signal: SIGKILL + scope: process + - id: with_coredump_action + description: Rule with a coredump action + expression: exec.file.name == "foo" + actions: + - coredump: + process: true + dentry: true + mount: true + no_compression: true + - id: with_hash_action + description: Rule with a hash action + expression: exec.file.name == "foo" + actions: + - hash: {} +` +const policyWithMissingRequiredRuleID = ` +version: 1.2.3 +rules: + - description: Rule with missing ID + expression: exec.file.name == "foo" +` + +const policyWithUnknownField = ` +version: 1.2.3 +rules: + - id: rule with unknown field + expression: exec.file.name == "foo" + unknown_field: "bar" +` + +const policyWithInvalidFieldType = ` +version: 1.2.3 +rules: + - id: 2 + expression: exec.file.name == "foo" +` + +const policyWithMultipleActions = ` +version: 1.2.3 +rules: + - id: rule with missing action + expression: exec.file.name == "foo" + actions: + - set: + name: global_var_set + value: true + kill: + signal: SIGKILL + scope: process +` diff --git a/pkg/security/secl/rules/ruleset.go b/pkg/security/secl/rules/ruleset.go index 8b53f15a27992..981109f7ad02e 100644 --- a/pkg/security/secl/rules/ruleset.go +++ b/pkg/security/secl/rules/ruleset.go @@ -284,6 +284,13 @@ func GetRuleEventType(rule *eval.Rule) (eval.EventType, error) { return eventType, nil } +func (rs *RuleSet) isActionAvailable(eventType eval.EventType, action *Action) bool { + if action.Def.Name() == HashAction && eventType != model.FileOpenEventType.String() && eventType != model.ExecEventType.String() { + return false + } + return true +} + // AddRule creates the rule evaluator and adds it to the bucket of its events func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule) (*eval.Rule, error) { if pRule.Def.Disabled { @@ -339,6 +346,10 @@ func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule } for _, action := range rule.PolicyRule.Actions { + if !rs.isActionAvailable(eventType, action) { + return nil, &ErrRuleLoad{Rule: pRule, Err: &ErrActionNotAvailable{ActionName: action.Def.Name(), EventType: eventType}} + } + // compile action filter if action.Def.Filter != nil { if err := action.CompileFilter(parsingContext, rs.model, rs.evalOpts); err != nil { @@ -503,14 +514,14 @@ func (rs *RuleSet) IsDiscarder(event eval.Event, field eval.Field) (bool, error) return IsDiscarder(ctx, field, bucket.rules) } -func (rs *RuleSet) runRuleActions(_ eval.Event, ctx *eval.Context, rule *Rule) error { +func (rs *RuleSet) runSetActions(_ eval.Event, ctx *eval.Context, rule *Rule) error { for _, action := range rule.PolicyRule.Actions { if !action.IsAccepted(ctx) { continue } switch { - // action.Kill has to handled by a ruleset listener + // other actions are handled by ruleset listeners case action.Def.Set != nil: name := string(action.Def.Set.Scope) if name != "" { @@ -541,6 +552,11 @@ func (rs *RuleSet) runRuleActions(_ eval.Event, ctx *eval.Context, rule *Rule) e } } } + + if rs.opts.ruleActionPerformedCb != nil { + rs.opts.ruleActionPerformedCb(rule, action.Def) + } + } } @@ -575,8 +591,8 @@ func (rs *RuleSet) Evaluate(event eval.Event) bool { rs.logger.Tracef("Rule `%s` matches with event `%s`\n", rule.ID, event) } - if err := rs.runRuleActions(event, ctx, rule); err != nil { - rs.logger.Errorf("Error while executing rule actions: %s", err) + if err := rs.runSetActions(event, ctx, rule); err != nil { + rs.logger.Errorf("Error while executing Set actions: %s", err) } rs.NotifyRuleMatch(rule, event) diff --git a/pkg/security/secl/rules/ruleset_test.go b/pkg/security/secl/rules/ruleset_test.go index 520d2ca426cdc..8221163152fd4 100644 --- a/pkg/security/secl/rules/ruleset_test.go +++ b/pkg/security/secl/rules/ruleset_test.go @@ -9,6 +9,7 @@ package rules import ( + "math" "reflect" "strings" "syscall" @@ -419,7 +420,7 @@ func TestRuleSetApprovers9(t *testing.T) { caps := FieldCapabilities{ { Field: "open.flags", - TypeBitmask: eval.ScalarValueType, + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, { Field: "open.file.path", @@ -745,6 +746,323 @@ func TestRuleSetApprovers20(t *testing.T) { } } +func TestRuleSetApprovers21(t *testing.T) { + exprs := []string{ + `open.flags&1 > 0 || open.flags&2 > 0`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 { + t.Fatalf("should get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers22(t *testing.T) { + exprs := []string{ + `open.flags&1 > 0 || open.flags > 0`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 0 { + t.Fatalf("shouldn't get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers23(t *testing.T) { + exprs := []string{ + `open.flags&1 > 0 && open.flags > 0`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 { + t.Fatalf("should get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers24(t *testing.T) { + exprs := []string{ + `open.flags&1 > 0 && open.flags > 2`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 0 { + t.Fatalf("shouldn't get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers25(t *testing.T) { + exprs := []string{ + `open.flags&(O_CREAT|O_WRONLY) == (O_CREAT|O_WRONLY)`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 { + t.Fatalf("should get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers26(t *testing.T) { + exprs := []string{ + `open.file.path in [~"/proc/*/mem"] && open.file.path not in ["/proc/${process.pid}/mem", "/proc/self/mem"]`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.GlobValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 { + t.Fatalf("should get approvers: %v", approvers) + } +} + +func TestRuleSetAUDApprovers(t *testing.T) { + caps := FieldCapabilities{ + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType, + }, + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + }, + { + Field: "process.auid", + TypeBitmask: eval.ScalarValueType | eval.RangeValueType, + FilterMode: ApproverOnlyMode, + RangeFilterValue: &RangeFilterValue{Min: 0, Max: model.AuditUIDUnset - 1}, + FilterWeight: 10, + HandleNotApproverValue: func(fieldValueType eval.FieldValueType, value interface{}) (eval.FieldValueType, interface{}, bool) { + if fieldValueType != eval.ScalarValueType { + return fieldValueType, value, false + } + + if i, ok := value.(int); ok && uint32(i) == model.AuditUIDUnset { + return eval.RangeValueType, RangeFilterValue{Min: 0, Max: model.AuditUIDUnset - 1}, true + } + + return fieldValueType, value, false + }, + }, + } + + getApprovers := func(exprs []string) Approvers { + handler := &testHandler{ + filters: make(map[string]testFieldValues), + } + + rs := newRuleSet() + rs.AddListener(handler) + + AddTestRuleExpr(t, rs, exprs...) + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + return approvers + } + + t.Run("equal", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid == 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 || approvers["process.auid"][0].Value != 1000 { + t.Fatalf("should get an approver`: %v", approvers) + } + }) + + t.Run("not-equal", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid != 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 0 { + t.Fatalf("shouldn't get an approver`: %v", approvers) + } + }) + + t.Run("not-equal-unset", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid != AUDIT_AUID_UNSET`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 0 || rge.Max != model.AuditUIDUnset-1 { + t.Fatalf("unexpected range") + } + }) + + t.Run("lesser-equal-than", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid <= 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 0 || rge.Max != 1000 { + t.Fatalf("unexpected range") + } + }) + + t.Run("lesser-than", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid < 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 0 || rge.Max != 999 { + t.Fatalf("unexpected range") + } + }) + + t.Run("greater-equal-than", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid >= 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 1000 || rge.Max != math.MaxUint32-1 { + t.Fatalf("unexpected range") + } + }) + + t.Run("greater-than", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid > 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 1001 || rge.Max != math.MaxUint32-1 { + t.Fatalf("unexpected range") + } + }) + + t.Run("greater-equal-than-and", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid >= 1000 && process.auid != AUDIT_AUID_UNSET`, + `open.flags&O_WRONLY > 0`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 2 || len(approvers["process.auid"]) != 2 && len(approvers["open.flags"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 1000 || rge.Max != math.MaxUint32-1 { + t.Fatalf("unexpected range") + } + }) + + t.Run("lesser-and-greater", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid > 1000 && process.auid < 4000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 2 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 1001 || rge.Max != math.MaxUint32-1 { + t.Fatalf("unexpected range") + } + + rge = approvers["process.auid"][1].Value.(RangeFilterValue) + if rge.Min != 0 || rge.Max != 3999 { + t.Fatalf("unexpected range") + } + }) +} + func TestGetRuleEventType(t *testing.T) { t.Run("ok", func(t *testing.T) { rule := eval.NewRule("aaa", `open.file.name == "test"`, &eval.Opts{}) diff --git a/pkg/security/security_profile/activity_tree/activity_tree.go b/pkg/security/security_profile/activity_tree/activity_tree.go index b4d24ae1b2b9b..e6858b9453bcd 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree.go +++ b/pkg/security/security_profile/activity_tree/activity_tree.go @@ -854,3 +854,82 @@ func (at *ActivityTree) EvictImageTag(imageTag string) { } at.ProcessNodes = newProcessNodes } + +func (at *ActivityTree) visitProcessNode(processNode *ProcessNode, cb func(processNode *ProcessNode)) { + for _, pn := range processNode.Children { + at.visitProcessNode(pn, cb) + } + cb(processNode) +} + +func (at *ActivityTree) visitFileNode(fileNode *FileNode, cb func(fileNode *FileNode)) { + if len(fileNode.Children) == 0 { + cb(fileNode) + return + } + + for _, file := range fileNode.Children { + at.visitFileNode(file, cb) + } +} + +func (at *ActivityTree) visit(cb func(processNode *ProcessNode)) { + for _, pn := range at.ProcessNodes { + at.visitProcessNode(pn, cb) + } +} + +// ExtractPaths returns the exec / fim, exec / parent paths +func (at *ActivityTree) ExtractPaths() (map[string][]string, map[string][]string) { + + fimPathsperExecPath := make(map[string][]string) + execAndParent := make(map[string][]string) + + at.visit(func(processNode *ProcessNode) { + var fimPaths []string + for _, file := range processNode.Files { + at.visitFileNode(file, func(fileNode *FileNode) { + path := fileNode.File.PathnameStr + if len(path) > 0 { + if strings.Contains(path, "*") { + fimPaths = append(fimPaths, `~"`+path+`"`) + } else { + fimPaths = append(fimPaths, `"`+path+`"`) + } + } + }) + } + execPath := fmt.Sprintf("\"%s\"", processNode.Process.FileEvent.PathnameStr) + paths, ok := fimPathsperExecPath[execPath] + if ok { + fimPathsperExecPath[execPath] = append(paths, fimPaths...) + } else { + fimPathsperExecPath[execPath] = fimPaths + } + p, pp := extractExecAndParent(processNode) + tmp, ok := execAndParent[p] + if ok { + execAndParent[p] = append(tmp, pp) + } else { + execAndParent[p] = []string{pp} + } + }) + + return fimPathsperExecPath, execAndParent +} + +// ExtractSyscalls return the syscalls present in an activity tree +func (at *ActivityTree) ExtractSyscalls(arch string) []string { + var syscalls []string + + at.visit(func(processNode *ProcessNode) { + for _, s := range processNode.Syscalls { + sycallKey := utils.SyscallKey{Arch: arch, ID: s} + syscall, ok := utils.Syscalls[sycallKey] + if ok { + syscalls = append(syscalls, syscall) + } + } + }) + return syscalls +} diff --git a/pkg/security/security_profile/activity_tree/process_node.go b/pkg/security/security_profile/activity_tree/process_node.go index 450774141ee70..a51951c3a7dfd 100644 --- a/pkg/security/security_profile/activity_tree/process_node.go +++ b/pkg/security/security_profile/activity_tree/process_node.go @@ -168,7 +168,6 @@ func (pn *ProcessNode) scrubAndReleaseArgsEnvs(resolver *sprocess.EBPFResolver) // Matches return true if the process fields used to generate the dump are identical with the provided model.Process func (pn *ProcessNode) Matches(entry *model.Process, matchArgs bool, normalize bool) bool { if normalize { - // should convert /var/run/1234/runc.pid + /var/run/54321/runc.pic into /var/run/*/runc.pid match := utils.PathPatternMatch(pn.Process.FileEvent.PathnameStr, entry.FileEvent.PathnameStr, utils.PathPatternMatchOpts{WildcardLimit: 3, PrefixNodeRequired: 1, SuffixNodeRequired: 1, NodeSizeLimit: 8}) if !match { return false diff --git a/pkg/security/security_profile/activity_tree/process_node_snapshot.go b/pkg/security/security_profile/activity_tree/process_node_snapshot.go index e4fce84b39828..cb057962f25c5 100644 --- a/pkg/security/security_profile/activity_tree/process_node_snapshot.go +++ b/pkg/security/security_profile/activity_tree/process_node_snapshot.go @@ -50,8 +50,6 @@ func (pn *ProcessNode) snapshot(owner Owner, stats *Stats, newEvent func() *mode // snapshot files if owner.IsEventTypeValid(model.FileOpenEventType) { pn.snapshotAllFiles(p, stats, newEvent, reducer) - } else { - pn.snapshotMemoryMappedFiles(p, stats, newEvent, reducer) } // snapshot sockets @@ -109,16 +107,6 @@ func (pn *ProcessNode) snapshotAllFiles(p *process.Process, stats *Stats, newEve pn.addFiles(files, stats, newEvent, reducer) } -func (pn *ProcessNode) snapshotMemoryMappedFiles(p *process.Process, stats *Stats, newEvent func() *model.Event, reducer *PathsReducer) { - // list the mmaped files of the process - mmapedFiles, err := getMemoryMappedFiles(p.Pid, pn.Process.FileEvent.PathnameStr) - if err != nil { - seclog.Warnf("error while listing memory maps (pid: %v): %s", p.Pid, err) - } - - pn.addFiles(mmapedFiles, stats, newEvent, reducer) -} - func (pn *ProcessNode) addFiles(files []string, stats *Stats, newEvent func() *model.Event, reducer *PathsReducer) { // list the mmaped files of the process slices.Sort(files) diff --git a/pkg/security/security_profile/activity_tree/utils.go b/pkg/security/security_profile/activity_tree/utils.go index d5ecac9fce466..ecf0c4e8bddfe 100644 --- a/pkg/security/security_profile/activity_tree/utils.go +++ b/pkg/security/security_profile/activity_tree/utils.go @@ -40,3 +40,15 @@ func AppendIfNotPresent(slice []string, toAdd string) ([]string, bool) { } return slice, false } + +func extractExecAndParent(processNode *ProcessNode) (string, string) { + processPath := processNode.Process.FileEvent.PathnameStr + + var parentPath string + if parent := processNode.GetParent(); parent != nil { + if parentNode, ok := parent.(*ProcessNode); ok { + parentPath = parentNode.Process.FileEvent.PathnameStr + } + } + return processPath, parentPath +} diff --git a/pkg/security/security_profile/dump/activity_dump.go b/pkg/security/security_profile/dump/activity_dump.go index 6984b01ecac2c..0f345ddf77d31 100644 --- a/pkg/security/security_profile/dump/activity_dump.go +++ b/pkg/security/security_profile/dump/activity_dump.go @@ -31,6 +31,7 @@ import ( cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" stime "github.com/DataDog/datadog-agent/pkg/security/resolvers/time" "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" mtdt "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree/metadata" @@ -102,6 +103,29 @@ type ActivityDumpHeader struct { DNSNames *utils.StringKeys `json:"dns_names"` } +// SECLRuleOpts defines SECL rules options +type SECLRuleOpts struct { + EnableKill bool + AllowList bool + Lineage bool + ImageName string + ImageTag string + Service string + FIM bool +} + +// SeccompProfile represents a Seccomp profile +type SeccompProfile struct { + DefaultAction string `yaml:"defaultAction" json:"defaultAction"` + Syscalls []SyscallPolicy `yaml:"syscalls" json:"syscalls"` +} + +// SyscallPolicy represents the policy in a seccomp profile +type SyscallPolicy struct { + Names []string `yaml:"names" json:"names"` + Action string `yaml:"action" json:"action"` +} + // NewActivityDumpLoadConfig returns a new instance of ActivityDumpLoadConfig func NewActivityDumpLoadConfig(evt []model.EventType, timeout time.Duration, waitListTimeout time.Duration, rate int, start time.Time, resolver *stime.Resolver) *model.ActivityDumpLoadConfig { adlc := &model.ActivityDumpLoadConfig{ @@ -884,3 +908,153 @@ func (ad *ActivityDump) DecodeJSON(reader io.Reader) error { return nil } + +// LoadActivityDumpsFromFiles load ads from a file or a directory +func LoadActivityDumpsFromFiles(path string) ([]*ActivityDump, error) { + + fileInfo, err := os.Stat(path) + if os.IsNotExist(err) { + return nil, fmt.Errorf("the path %s does not exist", path) + } else if err != nil { + return nil, fmt.Errorf("error checking the path: %s", err) + } + + if fileInfo.IsDir() { + dir, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open directory: %s", err) + } + defer dir.Close() + + // Read the directory contents + files, err := dir.Readdir(-1) + if err != nil { + return nil, fmt.Errorf("failed to read directory: %s", err) + } + + ads := []*ActivityDump{} + for _, file := range files { + ad, err := fileToActivityDump(filepath.Join(path, file.Name())) + if err != nil { + return nil, fmt.Errorf("couldn't decode secdump: %w", err) + } + ads = append(ads, ad) + } + return ads, nil + + } + // It's a file otherwise + ad, err := fileToActivityDump(path) + if err != nil { + return nil, fmt.Errorf("couldn't decode secdump: %w", err) + } + return []*ActivityDump{ad}, nil + +} + +func fileToActivityDump(filepath string) (*ActivityDump, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, fmt.Errorf("couldn't open secdump: %w", err) + } + defer f.Close() + ad := NewEmptyActivityDump(nil) + err = ad.DecodeProtobuf(f) + if err != nil { + return nil, fmt.Errorf("couldn't decode secdump: %w", err) + } + return ad, nil +} + +// GenerateRules return rules from activity dumps +func GenerateRules(ads []*ActivityDump, opts SECLRuleOpts) []*rules.RuleDefinition { + + var ruleDefs []*rules.RuleDefinition + groupID := getGroupID(opts) + + var execs []string + lineage := make(map[string][]string) + fims := make(map[string][]string) + + for _, ad := range ads { + fimPathsperExecPath, execAndParent := ad.ActivityTree.ExtractPaths() + + for execPath, fimPaths := range fimPathsperExecPath { + execs = append(execs, execPath) + tmp, ok := fims[execPath] + if ok { + fims[execPath] = append(tmp, fimPaths...) + } else { + fims[execPath] = fimPaths + } + } + + for p, pp := range execAndParent { + tmp, ok := lineage[p] + if ok { + lineage[p] = append(tmp, pp...) + } else { + lineage[p] = pp + } + } + } + + // add exec rules + if opts.AllowList { + ruleDefs = append(ruleDefs, addRule(fmt.Sprintf(`exec.file.path not in [%s]`, strings.Join(execs, ", ")), groupID, opts)) + } + + // add fim rules + if opts.FIM { + for exec, paths := range fims { + if len(paths) != 0 { + ruleDefs = append(ruleDefs, addRule(fmt.Sprintf(`open.file.path not in [%s] && process.file.path == %s`, strings.Join(paths, ", "), exec), groupID, opts)) + } + } + } + + // add lineage + if opts.Lineage { + var ( + parentOp = "==" + ctxOp = "!=" + ) + var expressions []string + for p, pp := range lineage { + for _, ppp := range pp { + if ppp == "" { + parentOp = "!=" + ctxOp = "==" + } + expressions = append(expressions, fmt.Sprintf(`exec.file.path == "%s" && process.parent.file.path %s "%s" && process.parent.container.id %s ""`, p, parentOp, ppp, ctxOp)) + } + } + + ruleDefs = append(ruleDefs, addRule(fmt.Sprintf(`!(%s)`, strings.Join(expressions, " || ")), groupID, opts)) + + } + return ruleDefs +} + +// GenerateSeccompProfile returns a seccomp a profile +func GenerateSeccompProfile(ads []*ActivityDump) *SeccompProfile { + + sp := &SeccompProfile{ + DefaultAction: "SCMP_ACT_KILL", + Syscalls: []SyscallPolicy{ + { + Action: "SCMP_ACT_ALLOW", + Names: []string{}, + }, + }, + } + + for _, ad := range ads { + syscalls := ad.ActivityTree.ExtractSyscalls(ad.Metadata.Arch) + sp.Syscalls[0].Names = append(sp.Syscalls[0].Names, syscalls...) + + } + slices.Sort(sp.Syscalls[0].Names) + sp.Syscalls[0].Names = slices.Compact(sp.Syscalls[0].Names) + return sp +} diff --git a/pkg/security/security_profile/dump/local_storage.go b/pkg/security/security_profile/dump/local_storage.go index a2d3af607eb08..a1fa0c334806d 100644 --- a/pkg/security/security_profile/dump/local_storage.go +++ b/pkg/security/security_profile/dump/local_storage.go @@ -22,10 +22,11 @@ import ( "github.com/hashicorp/golang-lru/v2/simplelru" "go.uber.org/atomic" + "github.com/DataDog/datadog-go/v5/statsd" + "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/seclog" - "github.com/DataDog/datadog-go/v5/statsd" ) type dumpFiles struct { @@ -73,20 +74,20 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac } var err error - adls.localDumps, err = simplelru.NewLRU(cfg.RuntimeSecurity.ActivityDumpLocalStorageMaxDumpsCount, func(_ string, files *[]string) { - if len(*files) == 0 { + adls.localDumps, err = simplelru.NewLRU(cfg.RuntimeSecurity.ActivityDumpLocalStorageMaxDumpsCount, func(_ string, filePaths *[]string) { + if len(*filePaths) == 0 { return } // notify the security profile directory provider that we're about to delete a profile if m.securityProfileManager != nil { - m.securityProfileManager.OnLocalStorageCleanup(*files) + m.securityProfileManager.OnLocalStorageCleanup(*filePaths) } // remove everything - for _, f := range *files { - if err := os.Remove(path.Join(cfg.RuntimeSecurity.ActivityDumpLocalStorageDirectory, f)); err != nil { - seclog.Warnf("Failed to remove dump %s (limit of dumps reach): %v", f, err) + for _, filePath := range *filePaths { + if err := os.Remove(filePath); err != nil { + seclog.Warnf("Failed to remove dump %s (limit of dumps reach): %v", filePath, err) } } @@ -137,11 +138,11 @@ func NewActivityDumpLocalStorage(cfg *config.Config, m *ActivityDumpManager) (Ac if !ok { ad = &dumpFiles{ Name: dumpName, - Files: make([]string, 1), + Files: make([]string, 0, 1), } localDumps[dumpName] = ad } - ad.Files = append(ad.Files, f.Name()) + ad.Files = append(ad.Files, filepath.Join(cfg.RuntimeSecurity.ActivityDumpLocalStorageDirectory, f.Name())) if !ad.MTime.IsZero() && ad.MTime.Before(dumpInfo.ModTime()) { ad.MTime = dumpInfo.ModTime() } @@ -183,36 +184,42 @@ func (storage *ActivityDumpLocalStorage) Persist(request config.StorageRequest, // add the file to the list of local dumps (thus removing one or more files if we reached the limit) if storage.localDumps != nil { - files, ok := storage.localDumps.Get(ad.Metadata.Name) + filePaths, ok := storage.localDumps.Get(ad.Metadata.Name) if !ok { storage.localDumps.Add(ad.Metadata.Name, &[]string{outputPath}) } else { - *files = append(*files, outputPath) + *filePaths = append(*filePaths, outputPath) } } // create output file _ = os.MkdirAll(request.OutputDirectory, 0400) - file, err := os.Create(outputPath) + tmpOutputPath := outputPath + ".tmp" + + file, err := os.Create(tmpOutputPath) if err != nil { - return fmt.Errorf("couldn't persist to file [%s]: %w", outputPath, err) + return fmt.Errorf("couldn't persist to file [%s]: %w", tmpOutputPath, err) } defer file.Close() // set output file access mode - if err = os.Chmod(outputPath, 0400); err != nil { - return fmt.Errorf("couldn't set mod for file [%s]: %w", outputPath, err) + if err := os.Chmod(tmpOutputPath, 0400); err != nil { + return fmt.Errorf("couldn't set mod for file [%s]: %w", tmpOutputPath, err) } // persist data to disk - if _, err = file.Write(raw.Bytes()); err != nil { - return fmt.Errorf("couldn't write to file [%s]: %w", outputPath, err) + if _, err := file.Write(raw.Bytes()); err != nil { + return fmt.Errorf("couldn't write to file [%s]: %w", tmpOutputPath, err) } - if err = file.Close(); err != nil { + if err := file.Close(); err != nil { return fmt.Errorf("could not close file [%s]: %w", file.Name(), err) } + if err := os.Rename(tmpOutputPath, outputPath); err != nil { + return fmt.Errorf("could not rename file from [%s] to [%s]: %w", tmpOutputPath, outputPath, err) + } + seclog.Infof("[%s] file for [%s] written at: [%s]", request.Format, ad.GetSelectorStr(), outputPath) return nil } diff --git a/pkg/security/security_profile/dump/manager.go b/pkg/security/security_profile/dump/manager.go index e98d13dcad655..10af2f9d787f7 100644 --- a/pkg/security/security_profile/dump/manager.go +++ b/pkg/security/security_profile/dump/manager.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" manager "github.com/DataDog/ebpf-manager" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" @@ -338,7 +338,7 @@ func (adm *ActivityDumpManager) prepareContextTags() { adm.contextTags = append(adm.contextTags, fmt.Sprintf("host:%s", adm.hostname)) // merge tags from config - for _, tag := range configUtils.GetConfiguredTags(coreconfig.Datadog(), true) { + for _, tag := range configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true) { if strings.HasPrefix(tag, "host") { continue } @@ -432,7 +432,7 @@ func (adm *ActivityDumpManager) HandleCGroupTracingEvent(event *model.CgroupTrac defer adm.Unlock() if len(event.ContainerContext.ContainerID) == 0 { - seclog.Errorf("received a cgroup tracing event with an empty container ID") + seclog.Warnf("received a cgroup tracing event with an empty container ID") return } @@ -690,7 +690,7 @@ func (pces *processCacheEntrySearcher) SearchTracedProcessCacheEntry(entry *mode // compute the list of ancestors, we need to start inserting them from the root ancestors := []*model.ProcessCacheEntry{entry} parent := pces.getNextAncestorBinaryOrArgv0(&entry.ProcessContext) - for parent != nil && pces.ad.MatchesSelector(entry) { + for parent != nil && pces.ad.MatchesSelector(parent) { ancestors = append(ancestors, parent) parent = pces.getNextAncestorBinaryOrArgv0(&parent.ProcessContext) } @@ -700,8 +700,8 @@ func (pces *processCacheEntrySearcher) SearchTracedProcessCacheEntry(entry *mode for _, parent = range ancestors { node, _, err := pces.ad.ActivityTree.CreateProcessNode(parent, imageTag, activity_tree.Snapshot, false, pces.adm.resolvers) if err != nil { - // if one of the parents wasn't inserted, leave now - break + // try to insert the other ancestors as we might find a valid root node in the lineage + continue } if node != nil { // This step is important to populate the kernel space "traced_pids" map. Some traced event types use this @@ -776,6 +776,8 @@ func (adm *ActivityDumpManager) SendStats() error { } } + adm.storage.SendTelemetry() + return nil } diff --git a/pkg/security/security_profile/dump/remote_storage.go b/pkg/security/security_profile/dump/remote_storage.go index 9c9c8958eb303..b952f77e3124c 100644 --- a/pkg/security/security_profile/dump/remote_storage.go +++ b/pkg/security/security_profile/dump/remote_storage.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" logsconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/seclog" @@ -51,7 +51,7 @@ func NewActivityDumpRemoteStorage() (ActivityDumpStorage, error) { storage := &ActivityDumpRemoteStorage{ tooLargeEntities: make(map[tooLargeEntityStatsEntry]*atomic.Uint64), client: &http.Client{ - Transport: ddhttputil.CreateHTTPTransport(pkgconfig.Datadog()), + Transport: ddhttputil.CreateHTTPTransport(pkgconfigsetup.Datadog()), }, } diff --git a/pkg/security/security_profile/dump/utils.go b/pkg/security/security_profile/dump/utils.go new file mode 100644 index 0000000000000..52d8c1e7d8433 --- /dev/null +++ b/pkg/security/security_profile/dump/utils.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package dump + +import ( + "fmt" + "strings" + + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/google/uuid" +) + +func addRule(expression string, groupID string, opts SECLRuleOpts) *rules.RuleDefinition { + ruleDef := &rules.RuleDefinition{ + Expression: expression, + GroupID: groupID, + ID: strings.Replace(uuid.New().String(), "-", "_", -1), + } + applyContext(ruleDef, opts) + if opts.EnableKill { + applyKillAction(ruleDef) + } + return ruleDef +} + +func applyKillAction(ruleDef *rules.RuleDefinition) { + ruleDef.Actions = []*rules.ActionDefinition{ + { + Kill: &rules.KillDefinition{ + Signal: "SIGKILL", + }, + }, + } +} + +func applyContext(ruleDef *rules.RuleDefinition, opts SECLRuleOpts) { + var context []string + + if opts.ImageName != "" { + context = append(context, fmt.Sprintf(`container.tags == "image_name:%s"`, opts.ImageName)) + } + if opts.ImageTag != "" { + context = append(context, fmt.Sprintf(`container.tags == "image_tag:%s"`, opts.ImageTag)) + } + if opts.Service != "" { + context = append(context, fmt.Sprintf(`process.envp == "DD_SERVICE=%s"`, opts.Service)) + } + + if len(context) == 0 { + return + } + + ruleDef.Expression = fmt.Sprintf("%s && (%s)", ruleDef.Expression, strings.Join(context, " && ")) +} + +func getGroupID(opts SECLRuleOpts) string { + groupID := "rules_" + if len(opts.ImageName) != 0 { + groupID = fmt.Sprintf("%s%s", groupID, opts.ImageName) + } else { + groupID = fmt.Sprintf("%s%s", groupID, strings.Replace(uuid.New().String(), "-", "_", -1)) // It should be unique so that we can target it at least, but ImageName should be always set + } + if len(opts.ImageTag) != 0 { + groupID = fmt.Sprintf("%s_%s", groupID, opts.ImageTag) + } + + return groupID +} diff --git a/pkg/security/security_profile/profile/manager.go b/pkg/security/security_profile/profile/manager.go index 79ff0079f94cc..c256ae847b488 100644 --- a/pkg/security/security_profile/profile/manager.go +++ b/pkg/security/security_profile/profile/manager.go @@ -28,7 +28,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/proto/api" - "github.com/DataDog/datadog-agent/pkg/security/rconfig" "github.com/DataDog/datadog-agent/pkg/security/resolvers" "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" @@ -182,15 +181,6 @@ func NewSecurityProfileManager(config *config.Config, statsdClient statsd.Client m.onLocalStorageCleanup = dirProvider.OnLocalStorageCleanup } - // instantiate remote-config provider - if config.RuntimeSecurity.RemoteConfigurationEnabled && config.RuntimeSecurity.SecurityProfileRCEnabled { - rcProvider, err := rconfig.NewRCProfileProvider() - if err != nil { - return nil, fmt.Errorf("couldn't instantiate a new security profile remote-config provider: %w", err) - } - m.providers = append(m.providers, rcProvider) - } - m.initMetricsMap() // register the manager to the provider(s) @@ -686,6 +676,7 @@ func (m *SecurityProfileManager) persistProfile(profile *SecurityProfile) error filename := profile.Metadata.Name + ".profile" outputPath := path.Join(m.config.RuntimeSecurity.SecurityProfileDir, filename) + tmpOutputPath := outputPath + ".tmp" // create output directory and output file, truncate existing file if a profile already exists err = os.MkdirAll(m.config.RuntimeSecurity.SecurityProfileDir, 0400) @@ -693,20 +684,24 @@ func (m *SecurityProfileManager) persistProfile(profile *SecurityProfile) error return fmt.Errorf("couldn't ensure directory [%s] exists: %w", m.config.RuntimeSecurity.SecurityProfileDir, err) } - file, err := os.OpenFile(outputPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0400) + file, err := os.OpenFile(tmpOutputPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0400) if err != nil { return fmt.Errorf("couldn't persist profile to file [%s]: %w", outputPath, err) } defer file.Close() - if _, err = file.Write(raw); err != nil { - return fmt.Errorf("couldn't write profile to file [%s]: %w", outputPath, err) + if _, err := file.Write(raw); err != nil { + return fmt.Errorf("couldn't write profile to file [%s]: %w", tmpOutputPath, err) } - if err = file.Close(); err != nil { + if err := file.Close(); err != nil { return fmt.Errorf("error trying to close profile file [%s]: %w", file.Name(), err) } + if err := os.Rename(tmpOutputPath, outputPath); err != nil { + return fmt.Errorf("couldn't rename profile file [%s] to [%s]: %w", tmpOutputPath, outputPath, err) + } + seclog.Infof("[profile] file for %s written at: [%s]", profile.selector.String(), outputPath) return nil diff --git a/pkg/security/serializers/helpers.go b/pkg/security/serializers/helpers.go index d4528f46f03ed..7ec853525ab30 100644 --- a/pkg/security/serializers/helpers.go +++ b/pkg/security/serializers/helpers.go @@ -7,17 +7,9 @@ package serializers // nolint: deadcode, unused -func getUint64Pointer(i *uint64) *uint64 { - if *i == 0 { +func createNumPointer[I uint32 | uint64](i I) *I { + if i == 0 { return nil } - return i -} - -// nolint: deadcode, unused -func getUint32Pointer(i *uint32) *uint32 { - if *i == 0 { - return nil - } - return i + return &i } diff --git a/pkg/dynamicinstrumentation/doc.go b/pkg/security/serializers/patcher.go similarity index 58% rename from pkg/dynamicinstrumentation/doc.go rename to pkg/security/serializers/patcher.go index 026de960a20b0..80c5a8dce8a7f 100644 --- a/pkg/dynamicinstrumentation/doc.go +++ b/pkg/security/serializers/patcher.go @@ -3,6 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package dynamicinstrumentation encapsulates a system-probe module which uses uprobes and bpf -// to exfiltrate data from running processes -package dynamicinstrumentation +package serializers + +// EventSerializerPatcher defines an event serializer patcher +type EventSerializerPatcher interface { + PatchEvent(*EventSerializer) +} diff --git a/pkg/security/serializers/serializers_linux.go b/pkg/security/serializers/serializers_linux.go index 2c855e9c34625..15e9b7bb701c9 100644 --- a/pkg/security/serializers/serializers_linux.go +++ b/pkg/security/serializers/serializers_linux.go @@ -658,15 +658,14 @@ func newFileSerializer(fe *model.FileEvent, e *model.Event, forceInode ...uint64 inode = forceInode[0] } - mode := uint32(fe.FileFields.Mode) fs := &FileSerializer{ Path: e.FieldHandlers.ResolveFilePath(e, fe), PathResolutionError: fe.GetPathResolutionError(), Name: e.FieldHandlers.ResolveFileBasename(e, fe), - Inode: getUint64Pointer(&inode), - MountID: getUint32Pointer(&fe.MountID), + Inode: createNumPointer(inode), + MountID: createNumPointer(fe.MountID), Filesystem: e.FieldHandlers.ResolveFileFilesystem(e, fe), - Mode: getUint32Pointer(&mode), // only used by open events + Mode: createNumPointer(uint32(fe.FileFields.Mode)), // only used by open events UID: int64(fe.UID), GID: int64(fe.GID), User: e.FieldHandlers.ResolveFileFieldsUser(e, &fe.FileFields), @@ -727,7 +726,7 @@ func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer Pid: ps.Pid, Tid: ps.Tid, - PPid: getUint32Pointer(&ps.PPid), + PPid: createNumPointer(ps.PPid), Comm: ps.Comm, TTY: ps.TTYName, Executable: newFileSerializer(&ps.FileEvent, e), @@ -946,13 +945,13 @@ func newMountEventSerializer(e *model.Event) *MountEventSerializer { mountSerializer := &MountEventSerializer{ MountPoint: &FileSerializer{ Path: e.GetMountRootPath(), - MountID: &e.Mount.ParentPathKey.MountID, - Inode: &e.Mount.ParentPathKey.Inode, + MountID: createNumPointer(e.Mount.ParentPathKey.MountID), + Inode: createNumPointer(e.Mount.ParentPathKey.Inode), }, Root: &FileSerializer{ Path: e.GetMountMountpointPath(), - MountID: &e.Mount.RootPathKey.MountID, - Inode: &e.Mount.RootPathKey.Inode, + MountID: createNumPointer(e.Mount.RootPathKey.MountID), + Inode: createNumPointer(e.Mount.RootPathKey.Inode), }, MountID: e.Mount.MountID, ParentMountID: e.Mount.ParentPathKey.MountID, @@ -1174,7 +1173,7 @@ func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { s.FileEventSerializer = &FileEventSerializer{ FileSerializer: *newFileSerializer(&event.Chmod.File, event), Destination: &FileSerializer{ - Mode: &event.Chmod.Mode, + Mode: createNumPointer(event.Chmod.Mode), }, } s.EventContextSerializer.Outcome = serializeOutcome(event.Chmod.Retval) @@ -1210,7 +1209,7 @@ func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { if event.Open.Flags&syscall.O_CREAT > 0 { s.FileEventSerializer.Destination = &FileSerializer{ - Mode: &event.Open.Mode, + Mode: createNumPointer(event.Open.Mode), } } @@ -1223,7 +1222,7 @@ func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { s.FileEventSerializer = &FileEventSerializer{ FileSerializer: *newFileSerializer(&event.Mkdir.File, event), Destination: &FileSerializer{ - Mode: &event.Mkdir.Mode, + Mode: createNumPointer(event.Mkdir.Mode), }, } s.EventContextSerializer.Outcome = serializeOutcome(event.Mkdir.Retval) diff --git a/pkg/security/serializers/serializers_windows.go b/pkg/security/serializers/serializers_windows.go index f296ee9d8a440..b40b90b610284 100644 --- a/pkg/security/serializers/serializers_windows.go +++ b/pkg/security/serializers/serializers_windows.go @@ -152,7 +152,7 @@ func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer ExitTime: utils.NewEasyjsonTimeIfNotZero(ps.ExitTime), Pid: ps.Pid, - PPid: getUint32Pointer(&ps.PPid), + PPid: createNumPointer(ps.PPid), Executable: newFileSerializer(&ps.FileEvent, e), CmdLine: e.FieldHandlers.ResolveProcessCmdLineScrubbed(e, ps), User: e.FieldHandlers.ResolveUser(e, ps), diff --git a/pkg/security/tests/action_test.go b/pkg/security/tests/action_test.go index 5214d6961619c..07d959e88bec4 100644 --- a/pkg/security/tests/action_test.go +++ b/pkg/security/tests/action_test.go @@ -124,8 +124,8 @@ func TestActionKill(t *testing.T) { validateMessageSchema(t, string(msg.Data)) jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { - if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigusr2")]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGUSR2')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } }) @@ -176,11 +176,11 @@ func TestActionKill(t *testing.T) { validateMessageSchema(t, string(msg.Data)) jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { - if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigkill")]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGKILL')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } - if _, err = jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at=~/20.*/)]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at =~ /20.*/)]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } }) @@ -325,11 +325,11 @@ func TestActionKillRuleSpecific(t *testing.T) { validateMessageSchema(t, string(msg.Data)) jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { - if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigkill")]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGKILL')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } - if _, err = jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at=~/20.*/)]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at =~ /20.*/)]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } }) @@ -346,7 +346,7 @@ func TestActionKillRuleSpecific(t *testing.T) { jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions`); err == nil { - t.Error(errors.New("unexpected rule action")) + t.Errorf("unexpected rule action %s", string(msg.Data)) } }) @@ -355,70 +355,8 @@ func TestActionKillRuleSpecific(t *testing.T) { assert.NoError(t, err) } -func TestActionKillDisarm(t *testing.T) { - SkipIfNotAvailable(t) - - if testEnvironment == DockerEnvironment { - t.Skip("Skip test spawning docker containers on docker") - } - - if _, err := whichNonFatal("docker"); err != nil { - t.Skip("Skip test where docker is unavailable") - } - - checkKernelCompatibility(t, "bpf_send_signal is not supported on this kernel and agent is running in container mode", func(kv *kernel.Version) bool { - return !kv.SupportBPFSendSignal() && env.IsContainerized() - }) - - ruleDefs := []*rules.RuleDefinition{ - { - ID: "kill_action_disarm_executable", - Expression: `exec.envs in ["TARGETTOKILL"] && container.id == ""`, - Actions: []*rules.ActionDefinition{ - { - Kill: &rules.KillDefinition{ - Signal: "SIGKILL", - }, - }, - }, - }, - { - ID: "kill_action_disarm_container", - Expression: `exec.envs in ["TARGETTOKILL"] && container.id != ""`, - Actions: []*rules.ActionDefinition{ - { - Kill: &rules.KillDefinition{ - Signal: "SIGKILL", - }, - }, - }, - }, - } - - sleep := which(t, "sleep") - const ( - enforcementDisarmerContainerPeriod = 10 * time.Second - enforcementDisarmerExecutablePeriod = 10 * time.Second - ) - - test, err := newTestModule(t, nil, ruleDefs, withStaticOpts(testOpts{ - enforcementDisarmerContainerEnabled: true, - enforcementDisarmerContainerMaxAllowed: 1, - enforcementDisarmerContainerPeriod: enforcementDisarmerContainerPeriod, - enforcementDisarmerExecutableEnabled: true, - enforcementDisarmerExecutableMaxAllowed: 1, - enforcementDisarmerExecutablePeriod: enforcementDisarmerExecutablePeriod, - eventServerRetention: 1 * time.Nanosecond, - })) - if err != nil { - t.Fatal(err) - } - defer test.Close() - - syscallTester, err := loadSyscallTester(t, test, "syscall_tester") - if err != nil { - t.Fatal(err) - } +func testActionKillDisarm(t *testing.T, test *testModule, sleep, syscallTester string, containerPeriod, executablePeriod time.Duration) { + t.Helper() testKillActionSuccess := func(t *testing.T, ruleID string, cmdFunc func(context.Context)) { test.msgSender.flush() @@ -455,11 +393,11 @@ func TestActionKillDisarm(t *testing.T) { validateMessageSchema(t, string(msg.Data)) jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) { - if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigkill")]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGKILL')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } - if _, err = jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at=~/20.*/)]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at =~ /20.*/)]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } }) @@ -489,7 +427,7 @@ func TestActionKillDisarm(t *testing.T) { jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) { if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions`); err == nil { - t.Error(errors.New("unexpected rule action")) + t.Errorf("unexpected rule action %s", string(msg.Data)) } }) @@ -518,7 +456,7 @@ func TestActionKillDisarm(t *testing.T) { // test that the kill action is re-armed after both executable cache entries have expired // sleep for: (TTL + cache flush period + 1s) to ensure the cache is flushed - time.Sleep(enforcementDisarmerExecutablePeriod + 5*time.Second + 1*time.Second) + time.Sleep(executablePeriod + 5*time.Second + 1*time.Second) testKillActionSuccess(t, "kill_action_disarm_executable", func(_ context.Context) { cmd := exec.Command(sleep, "1") cmd.Env = []string{"TARGETTOKILL=1"} @@ -556,10 +494,300 @@ func TestActionKillDisarm(t *testing.T) { // test that the kill action is re-armed after both container cache entries have expired // sleep for: (TTL + cache flush period + 1s) to ensure the cache is flushed - time.Sleep(enforcementDisarmerContainerPeriod + 5*time.Second + 1*time.Second) + time.Sleep(containerPeriod + 5*time.Second + 1*time.Second) testKillActionSuccess(t, "kill_action_disarm_container", func(_ context.Context) { cmd := newDockerInstance.Command("env", []string{"-i", "-", "TARGETTOKILL=1", "sleep", "5"}, []string{}) _ = cmd.Run() }) }) } + +func TestActionKillDisarm(t *testing.T) { + SkipIfNotAvailable(t) + + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + checkKernelCompatibility(t, "bpf_send_signal is not supported on this kernel and agent is running in container mode", func(kv *kernel.Version) bool { + return !kv.SupportBPFSendSignal() && env.IsContainerized() + }) + + sleep := which(t, "sleep") + + const ( + enforcementDisarmerContainerPeriod = 10 * time.Second + enforcementDisarmerExecutablePeriod = 10 * time.Second + ) + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "kill_action_disarm_executable", + Expression: `exec.envs in ["TARGETTOKILL"] && container.id == ""`, + Actions: []*rules.ActionDefinition{ + { + Kill: &rules.KillDefinition{ + Signal: "SIGKILL", + }, + }, + }, + }, + { + ID: "kill_action_disarm_container", + Expression: `exec.envs in ["TARGETTOKILL"] && container.id != ""`, + Actions: []*rules.ActionDefinition{ + { + Kill: &rules.KillDefinition{ + Signal: "SIGKILL", + }, + }, + }, + }, + } + + test, err := newTestModule(t, nil, ruleDefs, withStaticOpts(testOpts{ + enforcementDisarmerContainerEnabled: true, + enforcementDisarmerContainerMaxAllowed: 1, + enforcementDisarmerContainerPeriod: enforcementDisarmerContainerPeriod, + enforcementDisarmerExecutableEnabled: true, + enforcementDisarmerExecutableMaxAllowed: 1, + enforcementDisarmerExecutablePeriod: enforcementDisarmerExecutablePeriod, + eventServerRetention: 1 * time.Nanosecond, + })) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + testActionKillDisarm(t, test, sleep, syscallTester, enforcementDisarmerContainerPeriod, enforcementDisarmerExecutablePeriod) +} + +func TestActionKillDisarmFromRule(t *testing.T) { + SkipIfNotAvailable(t) + + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + checkKernelCompatibility(t, "bpf_send_signal is not supported on this kernel and agent is running in container mode", func(kv *kernel.Version) bool { + return !kv.SupportBPFSendSignal() && env.IsContainerized() + }) + + sleep := which(t, "sleep") + + const ( + enforcementDisarmerContainerPeriod = 10 * time.Second + enforcementDisarmerExecutablePeriod = 10 * time.Second + ) + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "kill_action_disarm_executable", + Expression: `exec.envs in ["TARGETTOKILL"] && container.id == ""`, + Actions: []*rules.ActionDefinition{ + { + Kill: &rules.KillDefinition{ + Signal: "SIGKILL", + Disarmer: &rules.KillDisarmerDefinition{ + Executable: &rules.KillDisarmerParamsDefinition{ + MaxAllowed: 1, + Period: enforcementDisarmerExecutablePeriod, + }, + Container: &rules.KillDisarmerParamsDefinition{ + MaxAllowed: 1, + Period: enforcementDisarmerContainerPeriod, + }, + }, + }, + }, + }, + }, + { + ID: "kill_action_disarm_container", + Expression: `exec.envs in ["TARGETTOKILL"] && container.id != ""`, + Actions: []*rules.ActionDefinition{ + { + Kill: &rules.KillDefinition{ + Signal: "SIGKILL", + Disarmer: &rules.KillDisarmerDefinition{ + Executable: &rules.KillDisarmerParamsDefinition{ + MaxAllowed: 1, + Period: enforcementDisarmerExecutablePeriod, + }, + Container: &rules.KillDisarmerParamsDefinition{ + MaxAllowed: 1, + Period: enforcementDisarmerContainerPeriod, + }, + }, + }, + }, + }, + }, + } + + test, err := newTestModule(t, nil, ruleDefs, withStaticOpts(testOpts{ + enforcementDisarmerContainerEnabled: true, + enforcementDisarmerContainerMaxAllowed: 9999, + enforcementDisarmerContainerPeriod: 1 * time.Hour, + enforcementDisarmerExecutableEnabled: true, + enforcementDisarmerExecutableMaxAllowed: 9999, + enforcementDisarmerExecutablePeriod: 1 * time.Hour, + eventServerRetention: 1 * time.Nanosecond, + })) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + testActionKillDisarm(t, test, sleep, syscallTester, enforcementDisarmerContainerPeriod, enforcementDisarmerExecutablePeriod) +} + +func TestActionHash(t *testing.T) { + SkipIfNotAvailable(t) + + if testEnvironment == DockerEnvironment { + t.Skip("skipping in docker, not sharing the same pid ns and doesn't have a container ID") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "hash_action", + Expression: `open.file.path == "{{.Root}}/test-hash-action" && open.flags&O_CREAT == O_CREAT`, + Actions: []*rules.ActionDefinition{ + { + Hash: &rules.HashDefinition{}, + }, + }, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + testFile, _, err := test.Path("test-hash-action") + if err != nil { + t.Fatal(err) + } + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + done := make(chan bool, 10) + + t.Run("open-process-exit", func(t *testing.T) { + test.msgSender.flush() + test.WaitSignal(t, func() error { + go func() { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := runSyscallTesterFunc( + timeoutCtx, t, syscallTester, + "slow-write", "2", testFile, "aaa", + ); err != nil { + t.Error(err) + } + + done <- true + }() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "hash_action") + }) + + err = retry.Do(func() error { + msg := test.msgSender.getMsg("hash_action") + if msg == nil { + return errors.New("not found") + } + validateMessageSchema(t, string(msg.Data)) + + jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.state == 'Done')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.trigger == 'process_exit')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + if el, err := jsonpath.JsonPathLookup(obj, `$.file.hashes`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + }) + + return nil + }, retry.Delay(500*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + + <-done + }) + + t.Run("open-timeout", func(t *testing.T) { + test.msgSender.flush() + test.WaitSignal(t, func() error { + go func() { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + if err := runSyscallTesterFunc( + timeoutCtx, t, syscallTester, + // exceed the file hasher timeout, use fork to force an event that will trigger the flush mechanism + "slow-write", "2", testFile, "aaa", ";", "sleep", "4", ";", "fork", ";", "sleep", "1", + ); err != nil { + t.Error(err) + } + + done <- true + }() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "hash_action") + }) + + err = retry.Do(func() error { + msg := test.msgSender.getMsg("hash_action") + if msg == nil { + return errors.New("not found") + } + validateMessageSchema(t, string(msg.Data)) + + jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.state == 'Done')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.trigger == 'timeout')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + if el, err := jsonpath.JsonPathLookup(obj, `$.file.hashes`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + }) + + return nil + }, retry.Delay(500*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + + <-done + }) +} diff --git a/pkg/security/tests/activity_dumps_loadcontroller_test.go b/pkg/security/tests/activity_dumps_loadcontroller_test.go index af6d7c106d8ca..42a3c5e8fa763 100644 --- a/pkg/security/tests/activity_dumps_loadcontroller_test.go +++ b/pkg/security/tests/activity_dumps_loadcontroller_test.go @@ -12,7 +12,6 @@ import ( "fmt" "os" "path/filepath" - "slices" "testing" "time" @@ -187,15 +186,7 @@ func TestActivityDumpsLoadControllerEventTypes(t *testing.T) { if err != nil { t.Fatal(err) } - activeTypes := make([]model.EventType, len(activeEventTypes)) - for i, eventType := range activeEventTypes { - activeTypes[i] = eventType - } - if !slices.Contains(activeTypes, model.FileOpenEventType) { - // add open to the list of expected event types because mmaped files being present in the dump - activeTypes = append(activeTypes, model.FileOpenEventType) - } - if !isEventTypesStringSlicesEqual(activeTypes, presentEventTypes) { + if !isEventTypesStringSlicesEqual(activeEventTypes, presentEventTypes) { t.Fatalf("Dump's event types don't match: expected[%v] vs observed[%v]", activeEventTypes, presentEventTypes) } dump = nextDump diff --git a/pkg/security/tests/cgroup_test.go b/pkg/security/tests/cgroup_test.go index 4a08f47b613cc..8dfa0859f7ce8 100644 --- a/pkg/security/tests/cgroup_test.go +++ b/pkg/security/tests/cgroup_test.go @@ -16,11 +16,12 @@ import ( "syscall" "testing" + "github.com/stretchr/testify/assert" + "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" - "github.com/stretchr/testify/assert" ) func createCGroup(name string) (string, error) { @@ -96,7 +97,6 @@ func TestCGroup(t *testing.T) { }) t.Run("systemd", func(t *testing.T) { - t.Skip("unstable on some distribution") checkKernelCompatibility(t, "RHEL, SLES and Oracle kernels", func(kv *kernel.Version) bool { // TODO(lebauce): On the systems, systemd service creation doesn't trigger a cprocs write @@ -134,4 +134,42 @@ ExecStart=/usr/bin/touch %s`, testFile2) test.validateOpenSchema(t, event) }) }) + + t.Run("podman", func(t *testing.T) { + checkKernelCompatibility(t, "RHEL, SLES and Oracle kernels", func(kv *kernel.Version) bool { + // TODO(lebauce): On the systems, systemd service creation doesn't trigger a cprocs write + return kv.IsRH7Kernel() || kv.IsOracleUEKKernel() || kv.IsSLESKernel() || kv.IsOpenSUSELeapKernel() + }) + + test.WaitSignal(t, func() error { + serviceUnit := fmt.Sprintf(`[Service] +Type=oneshot +ExecStart=/usr/bin/touch %s`, testFile2) + if err := os.WriteFile("/etc/systemd/system/cws-test.service", []byte(serviceUnit), 0700); err != nil { + return err + } + if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil { + return err + } + if err := exec.Command("systemctl", "start", "cws-test").Run(); err != nil { + return err + } + if err := exec.Command("systemctl", "stop", "cws-test").Run(); err != nil { + return err + } + if err := os.Remove("/etc/systemd/system/cws-test.service"); err != nil { + } + if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil { + return err + } + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_cgroup_systemd") + assertFieldEqual(t, event, "open.file.path", testFile2) + assertFieldEqual(t, event, "cgroup.manager", "systemd") + assertFieldNotEqual(t, event, "cgroup.id", "") + + test.validateOpenSchema(t, event) + }) + }) } diff --git a/pkg/security/tests/cmdwrapper.go b/pkg/security/tests/cmdwrapper.go index ce572e6465311..bfc68552e95df 100644 --- a/pkg/security/tests/cmdwrapper.go +++ b/pkg/security/tests/cmdwrapper.go @@ -23,6 +23,7 @@ const ( noWrapperType wrapperType = "" //nolint:deadcode,unused stdWrapperType wrapperType = "std" dockerWrapperType wrapperType = "docker" + podmanWrapperType wrapperType = "podman" multiWrapperType wrapperType = "multi" ) @@ -151,18 +152,32 @@ func (d *dockerCmdWrapper) selectImageFromLibrary(kind string) error { return err } -func newDockerCmdWrapper(mountSrc, mountDest string, kind string) (*dockerCmdWrapper, error) { - executable, err := exec.LookPath("docker") +func newDockerCmdWrapper(mountSrc, mountDest string, kind string, runtimeCommand string) (*dockerCmdWrapper, error) { + if runtimeCommand == "" { + runtimeCommand = "docker" + } + + executable, err := exec.LookPath(runtimeCommand) if err != nil { return nil, err } // check docker is available cmd := exec.Command(executable, "version") - if err := cmd.Run(); err != nil { + output, err := cmd.Output() + if err != nil { return nil, err } + for _, line := range strings.Split(strings.ToLower(string(output)), "\n") { + splited := strings.SplitN(line, ":", 2) + if splited[0] == "client" && len(splited) > 1 { + if client := strings.TrimSpace(splited[1]); client != "" && !strings.Contains(client, runtimeCommand) { + return nil, fmt.Errorf("client doesn't report as '%s' but as '%s'", runtimeCommand, client) + } + } + } + wrapper := &dockerCmdWrapper{ executable: executable, mountSrc: mountSrc, diff --git a/pkg/security/tests/container_test.go b/pkg/security/tests/container_test.go index 9376008ad4f04..58c0190989ddd 100644 --- a/pkg/security/tests/container_test.go +++ b/pkg/security/tests/container_test.go @@ -9,7 +9,6 @@ package tests import ( - "fmt" "os/exec" "testing" "time" @@ -55,7 +54,7 @@ func TestContainerCreatedAt(t *testing.T) { t.Fatal(err) } - dockerWrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "ubuntu") + dockerWrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "ubuntu", "") if err != nil { t.Skip("Skipping created time in containers tests: Docker not available") return @@ -102,7 +101,7 @@ func TestContainerCreatedAt(t *testing.T) { }) } -func TestContainerFlags(t *testing.T) { +func TestContainerFlagsDocker(t *testing.T) { SkipIfNotAvailable(t) ruleDefs := []*rules.RuleDefinition{ @@ -122,9 +121,9 @@ func TestContainerFlags(t *testing.T) { t.Fatal(err) } - dockerWrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "ubuntu") + dockerWrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "ubuntu", "") if err != nil { - t.Skip("Skipping created time in containers tests: Docker not available") + t.Skipf("Skipping container test: Docker not available (%s)", err.Error()) return } @@ -149,64 +148,49 @@ func TestContainerFlags(t *testing.T) { }) } -func TestContainerScopedVariable(t *testing.T) { +func TestContainerFlagsPodman(t *testing.T) { SkipIfNotAvailable(t) ruleDefs := []*rules.RuleDefinition{ { - ID: "test_container_set_scoped_variable", - Expression: `open.file.path == "/tmp/test-open"`, - Actions: []*rules.ActionDefinition{{ - Set: &rules.SetDefinition{ - Name: "var1", - Value: true, - Scope: "container", - }, - }}, - }, { - ID: "test_container_check_scoped_variable", - Expression: `open.file.path == "/tmp/test-open-2" && ${container.var1} == true`, + ID: "test_container_flags", + Expression: `container.runtime == "podman" && container.id != "" && open.file.path == "{{.Root}}/test-open" && cgroup.id =~ "*libpod*"`, }, } - test, err := newTestModule(t, nil, ruleDefs) if err != nil { t.Fatal(err) } defer test.Close() - wrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "alpine") + testFile, _, err := test.Path("test-open") + if err != nil { + t.Fatal(err) + } + + podmanWrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "ubuntu", string(podmanWrapperType)) if err != nil { - t.Skip("docker no available") + t.Skip("Skipping created time in containers tests: podman not available") return } - if _, err := wrapper.start(); err != nil { + if _, err := podmanWrapper.start(); err != nil { t.Fatal(err) } - defer wrapper.stop() + defer podmanWrapper.stop() - wrapper.RunTest(t, "set-var", func(t *testing.T, kind wrapperType, cmdFunc func(cmd string, args []string, envs []string) *exec.Cmd) { + podmanWrapper.Run(t, "container-runtime", func(t *testing.T, kind wrapperType, cmdFunc func(cmd string, args []string, envs []string) *exec.Cmd) { test.WaitSignal(t, func() error { - cmd := cmdFunc("/bin/touch", []string{"/tmp/test-open"}, nil) - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("%s: %w", out, err) - } - return nil + cmd := cmdFunc("touch", []string{testFile}, nil) + return cmd.Run() }, func(event *model.Event, rule *rules.Rule) { - assert.Equal(t, "test_container_set_scoped_variable", rule.ID, "wrong rule triggered") - }) - }) + assertTriggeredRule(t, rule, "test_container_flags") + assertFieldEqual(t, event, "open.file.path", testFile) + assertFieldNotEmpty(t, event, "container.id", "container id shouldn't be empty") + assertFieldEqual(t, event, "container.runtime", "podman") + assert.Equal(t, containerutils.CGroupFlags(containerutils.CGroupManagerPodman), event.CGroupContext.CGroupFlags) - wrapper.RunTest(t, "check-var", func(t *testing.T, kind wrapperType, cmdFunc func(cmd string, args []string, envs []string) *exec.Cmd) { - test.WaitSignal(t, func() error { - cmd := cmdFunc("/bin/touch", []string{"/tmp/test-open-2"}, nil) - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("%s: %w", out, err) - } - return nil - }, func(event *model.Event, rule *rules.Rule) { - assert.Equal(t, "test_container_check_scoped_variable", rule.ID, "wrong rule triggered") + test.validateOpenSchema(t, event) }) }) } diff --git a/pkg/security/tests/event_test.go b/pkg/security/tests/event_test.go new file mode 100644 index 0000000000000..bf567e41d29d2 --- /dev/null +++ b/pkg/security/tests/event_test.go @@ -0,0 +1,307 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && functionaltests + +// Package tests holds tests related files +package tests + +import ( + "context" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/security/events" + "github.com/DataDog/datadog-agent/pkg/security/metrics" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) + +func TestEventRulesetLoaded(t *testing.T) { + SkipIfNotAvailable(t) + + rule := &rules.RuleDefinition{ + ID: "path_test", + Expression: `open.file.path == "/aaaaaaaaaaaaaaaaaaaaaaaaa" && open.flags & O_CREAT != 0`, + } + + test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + test.cws.SendStats() + + key := metrics.MetricRuleSetLoaded + assert.NotEmpty(t, test.statsdClient.Get(key)) + assert.NotZero(t, test.statsdClient.Get(key)) + + test.statsdClient.Flush() + + t.Run("ruleset_loaded", func(t *testing.T) { + count := test.statsdClient.Get(key) + assert.Zero(t, count) + + err = test.GetCustomEventSent(t, func() error { + // force a reload + return syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + }, func(rule *rules.Rule, customEvent *events.CustomEvent) bool { + assert.Equal(t, events.RulesetLoadedRuleID, rule.ID, "wrong rule") + + test.cws.SendStats() + + assert.Equal(t, count+1, test.statsdClient.Get(key)) + + return validateRuleSetLoadedSchema(t, customEvent) + }, 20*time.Second, model.CustomRulesetLoadedEventType) + if err != nil { + t.Fatal(err) + } + }) +} + +func TestEventHeartbeatSent(t *testing.T) { + SkipIfNotAvailable(t) + + rule := &rules.RuleDefinition{ + ID: "path_test", + Expression: `open.file.path == "/aaaaaaaaaaaaaaaaaaaaaaaaa" && open.flags & O_CREAT != 0`, + } + + test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + test.cws.SendStats() + + t.Run("heartbeat", func(t *testing.T) { + err = test.GetCustomEventSent(t, func() error { + // force a reload + return syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + }, func(rule *rules.Rule, customEvent *events.CustomEvent) bool { + + isHeartbeatEvent := events.HeartbeatRuleID == rule.ID + + return validateHeartbeatSchema(t, customEvent) && isHeartbeatEvent + }, 80*time.Second, model.CustomHeartbeatEventType) + if err != nil { + t.Fatal(err) + } + }) +} + +func TestEventRaleLimiters(t *testing.T) { + SkipIfNotAvailable(t) + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_unique_id", + Expression: `open.file.path == "{{.Root}}/test-unique-id"`, + Every: 5 * time.Second, + RateLimiterToken: []string{"process.file.name"}, + }, + { + ID: "test_std", + Expression: `open.file.path == "{{.Root}}/test-std"`, + Every: 5 * time.Second, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + t.Run("token", func(t *testing.T) { + testFile, _, err := test.Path("test-unique-id") + if err != nil { + t.Fatal(err) + } + defer os.Remove(testFile) + + err = test.GetEventSent(t, func() error { + f, err := os.OpenFile(testFile, os.O_CREATE, 0) + if err != nil { + t.Fatal(err) + } + return f.Close() + }, func(rule *rules.Rule, event *model.Event) bool { + return true + }, time.Second*3, "test_unique_id") + if err != nil { + t.Error(err) + } + + // open from another process + err = test.GetEventSent(t, func() error { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + return runSyscallTesterFunc( + timeoutCtx, t, syscallTester, + "open", testFile, + ) + }, func(rule *rules.Rule, event *model.Event) bool { + return true + }, time.Second*3, "test_unique_id") + if err != nil { + t.Error(err) + } + + // open from the first process + err = test.GetEventSent(t, func() error { + f, err := os.OpenFile(testFile, os.O_CREATE, 0) + if err != nil { + t.Fatal(err) + } + return f.Close() + }, func(rule *rules.Rule, event *model.Event) bool { + return true + }, time.Second*3, "test_unique_id") + if err == nil { + t.Error("unexpected event") + } + }) + + t.Run("std", func(t *testing.T) { + testFile, _, err := test.Path("test-std") + if err != nil { + t.Fatal(err) + } + defer os.Remove(testFile) + + err = test.GetEventSent(t, func() error { + f, err := os.OpenFile(testFile, os.O_CREATE, 0) + if err != nil { + t.Fatal(err) + } + return f.Close() + }, func(rule *rules.Rule, event *model.Event) bool { + return true + }, time.Second*3, "test_std") + if err != nil { + t.Error(err) + } + + // open from another process + err = test.GetEventSent(t, func() error { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + return runSyscallTesterFunc( + timeoutCtx, t, syscallTester, + "open", testFile, + ) + }, func(rule *rules.Rule, event *model.Event) bool { + return true + }, time.Second*3, "test_std") + if err == nil { + t.Error(err) + } + }) +} + +func truncatedParents(t *testing.T, staticOpts testOpts, dynamicOpts dynamicTestOpts) { + var truncatedParents string + for i := 0; i < model.MaxPathDepth; i++ { + truncatedParents += "a/" + } + + rule := &rules.RuleDefinition{ + ID: "path_test", + // because of the truncated path open.file.path will be '/a/a/a/a*' and not '{{.Root}}/a/a/a*' + Expression: `open.file.path =~ "*/a/**" && open.flags & O_CREAT != 0`, + } + + test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}, withStaticOpts(staticOpts), withDynamicOpts(dynamicOpts)) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + truncatedParentsFile, _, err := test.Path(truncatedParents) + if err != nil { + t.Fatal(err) + } + + if os.MkdirAll(path.Dir(truncatedParentsFile), 0755) != nil { + t.Fatal(err) + } + + // By default, the `t.TempDir` cleanup has a bit of a hard time cleaning up such a deep file + // let's help it by cleaning up most of the directories + defer cleanupABottomUp(truncatedParentsFile) + + err = test.GetCustomEventSent(t, func() error { + f, err := os.OpenFile(truncatedParentsFile, os.O_CREATE, 0755) + if err != nil { + return err + } + return f.Close() + }, func(rule *rules.Rule, customEvent *events.CustomEvent) bool { + assert.Equal(t, events.AbnormalPathRuleID, rule.ID, "wrong rule") + return true + }, getEventTimeout, model.CustomTruncatedParentsEventType) + if err != nil { + t.Fatal(err) + } + + test.WaitSignal(t, func() error { + f, err := os.OpenFile(truncatedParentsFile, os.O_CREATE, 0755) + if err != nil { + return err + } + return f.Close() + }, func(event *model.Event, rule *rules.Rule) { + // check the length of the filepath that triggered the custom event + filepath, err := event.GetFieldValue("open.file.path") + if err == nil { + splittedFilepath := strings.Split(filepath.(string), "/") + for len(splittedFilepath) > 1 && splittedFilepath[0] != "a" { + // Remove the initial "" and all subsequent parents introduced by the mount point, we only want to + // count the "a"s. + splittedFilepath = splittedFilepath[1:] + } + assert.Equal(t, "a", splittedFilepath[0], "invalid path resolution at the left edge") + assert.Equal(t, "a", splittedFilepath[len(splittedFilepath)-1], "invalid path resolution at the right edge") + assert.Equal(t, model.MaxPathDepth, len(splittedFilepath), "invalid path depth") + } + }) +} + +func cleanupABottomUp(path string) { + for filepath.Base(path) == "a" { + os.RemoveAll(path) + path = filepath.Dir(path) + } +} + +func TestEventTruncatedParents(t *testing.T) { + SkipIfNotAvailable(t) + + t.Run("map", func(t *testing.T) { + truncatedParents(t, testOpts{disableERPCDentryResolution: true}, dynamicTestOpts{disableAbnormalPathCheck: true}) + }) + + t.Run("erpc", func(t *testing.T) { + truncatedParents(t, testOpts{disableMapDentryResolution: true}, dynamicTestOpts{disableAbnormalPathCheck: true}) + }) +} diff --git a/pkg/security/tests/file_windows_test.go b/pkg/security/tests/file_windows_test.go index 491fa3ea6cab4..15b7c57e7b0a3 100644 --- a/pkg/security/tests/file_windows_test.go +++ b/pkg/security/tests/file_windows_test.go @@ -223,7 +223,7 @@ func TestWriteFileEventWithCreate(t *testing.T) { } return f.Close() }, test.validateFileEvent(t, noWrapperType, func(event *model.Event, rule *rules.Rule) { - assertFieldEqualCaseInsensitve(t, event, "write.file.name", "test.bad", event, "write.file.name file didn't match") + assertFieldEqualCaseInsensitve(t, event, "write.file.name", "test.bad", "write.file.name file didn't match") })) }) } diff --git a/pkg/security/tests/filters_test.go b/pkg/security/tests/filters_test.go index 74486100f4470..8c4116980a7fe 100644 --- a/pkg/security/tests/filters_test.go +++ b/pkg/security/tests/filters_test.go @@ -18,9 +18,11 @@ import ( "time" "unsafe" + "github.com/avast/retry-go/v4" "github.com/cilium/ebpf" "github.com/stretchr/testify/assert" + "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/probe" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" @@ -73,7 +75,6 @@ func TestFilterOpenBasenameApprover(t *testing.T) { if err != nil { t.Fatal(err) } - defer os.Remove(testFile1) if err := waitForOpenProbeEvent(test, func() error { @@ -86,12 +87,26 @@ func TestFilterOpenBasenameApprover(t *testing.T) { t.Fatal(err) } - defer os.Remove(testFile2) - testFile2, _, err = test.Path("test-oba-2") if err != nil { t.Fatal(err) } + defer os.Remove(testFile2) + + // stats + err = retry.Do(func() error { + test.eventMonitor.SendStats() + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":approver_type:basename"); count == 0 { + return fmt.Errorf("expected metrics not found: %+v", test.statsdClient.GetByPrefix(metrics.MetricEventApproved)) + } + + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count == 0 { + return fmt.Errorf("expected metrics not found: %+v", test.statsdClient.GetByPrefix(metrics.MetricEventApproved)) + } + + return nil + }, retry.Delay(1*time.Second), retry.Attempts(5), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) if err := waitForOpenProbeEvent(test, func() error { fd2, err = openTestFile(test, testFile2, syscall.O_CREAT) @@ -121,7 +136,7 @@ func TestFilterOpenLeafDiscarder(t *testing.T) { // a discarder is created). rule := &rules.RuleDefinition{ ID: "test_rule", - Expression: `open.filename =~ "{{.Root}}/no-approver-*" && open.flags & (O_CREAT | O_SYNC) > 0`, + Expression: `open.file.path =~ "{{.Root}}/no-approver-*" && open.flags & (O_CREAT | O_SYNC) > 0`, } test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) @@ -326,6 +341,205 @@ func TestFilterOpenGrandParentDiscarder(t *testing.T) { testFilterOpenParentDiscarder(t, "grandparent", "parent") } +func runAUIDTest(t *testing.T, test *testModule, goSyscallTester, auidOK, auidKO string) { + var cmdWrapper *dockerCmdWrapper + cmdWrapper, err := test.StartADocker() + if err != nil { + t.Fatal(err) + } + defer cmdWrapper.stop() + + // reset stats + test.statsdClient.Flush() + + if err := waitForOpenProbeEvent(test, func() error { + args := []string{ + "-login-uid-open-test", + "-login-uid-open-path", "/tmp/test-auid", + "-login-uid-open-uid", auidOK, + } + + cmd := cmdWrapper.Command(goSyscallTester, args, []string{}) + return cmd.Run() + }, "/tmp/test-auid"); err != nil { + t.Fatal(err) + } + + // stats + err = retry.Do(func() error { + test.eventMonitor.SendStats() + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":approver_type:auid"); count == 0 { + return fmt.Errorf("expected metrics not found: %+v", test.statsdClient.GetByPrefix(metrics.MetricEventApproved)) + } + + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count == 0 { + return fmt.Errorf("expected metrics not found: %+v", test.statsdClient.GetByPrefix(metrics.MetricEventApproved)) + } + + return nil + }, retry.Delay(1*time.Second), retry.Attempts(5), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + + if err := waitForOpenProbeEvent(test, func() error { + args := []string{ + "-login-uid-open-test", + "-login-uid-open-path", "/tmp/test-auid", + "-login-uid-open-uid", auidKO, + } + + cmd := cmdWrapper.Command(goSyscallTester, args, []string{}) + return cmd.Run() + }, "/tmp/test-auid"); err == nil { + t.Fatal("shouldn't get an event") + } +} + +func TestFilterOpenAUIDEqualApprover(t *testing.T) { + SkipIfNotAvailable(t) + + // skip test that are about to be run on docker (to avoid trying spawning docker in docker) + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_equal_1", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid == 1005`, + }, + { + ID: "test_equal_2", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid == 0`, + }, + { + ID: "test_equal_3", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid == AUDIT_AUID_UNSET`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + goSyscallTester, err := loadSyscallTester(t, test, "syscall_go_tester") + if err != nil { + t.Fatal(err) + } + + t.Run("equal-fixed-value", func(t *testing.T) { + runAUIDTest(t, test, goSyscallTester, "1005", "6000") + }) + + t.Run("equal-zero", func(t *testing.T) { + runAUIDTest(t, test, goSyscallTester, "0", "6000") + }) + + t.Run("equal-unset", func(t *testing.T) { + runAUIDTest(t, test, goSyscallTester, "-1", "6000") + }) +} + +func TestFilterOpenAUIDLesserApprover(t *testing.T) { + SkipIfNotAvailable(t) + + // skip test that are about to be run on docker (to avoid trying spawning docker in docker) + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_range_lesser", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid < 500`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + goSyscallTester, err := loadSyscallTester(t, test, "syscall_go_tester") + if err != nil { + t.Fatal(err) + } + + runAUIDTest(t, test, goSyscallTester, "450", "605") +} + +func TestFilterOpenAUIDGreaterApprover(t *testing.T) { + SkipIfNotAvailable(t) + + // skip test that are about to be run on docker (to avoid trying spawning docker in docker) + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_range_greater", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid > 1000`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + goSyscallTester, err := loadSyscallTester(t, test, "syscall_go_tester") + if err != nil { + t.Fatal(err) + } + + runAUIDTest(t, test, goSyscallTester, "1500", "605") +} + +func TestFilterOpenAUIDNotEqualUnsetApprover(t *testing.T) { + SkipIfNotAvailable(t) + + // skip test that are about to be run on docker (to avoid trying spawning docker in docker) + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_equal_4", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid != AUDIT_AUID_UNSET`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + goSyscallTester, err := loadSyscallTester(t, test, "syscall_go_tester") + if err != nil { + t.Fatal(err) + } + + runAUIDTest(t, test, goSyscallTester, "6000", "-1") +} + func TestFilterDiscarderMask(t *testing.T) { SkipIfNotAvailable(t) @@ -413,7 +627,7 @@ func TestFilterRenameFileDiscarder(t *testing.T) { // a discarder is created). rule := &rules.RuleDefinition{ ID: "test_rule", - Expression: `open.filename =~ "{{.Root}}/a*/test"`, + Expression: `open.file.path =~ "{{.Root}}/a*/test"`, } test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) @@ -499,7 +713,7 @@ func TestFilterRenameFolderDiscarder(t *testing.T) { // a discarder is created). rule := &rules.RuleDefinition{ ID: "test_rule", - Expression: `open.filename =~ "{{.Root}}/a*/test"`, + Expression: `open.file.path =~ "{{.Root}}/a*/test"`, } test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) @@ -608,6 +822,21 @@ func TestFilterOpenFlagsApprover(t *testing.T) { t.Fatal(err) } + // stats + err = retry.Do(func() error { + test.eventMonitor.SendStats() + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":approver_type:flag"); count == 0 { + return fmt.Errorf("expected metrics not found: %+v", test.statsdClient.GetByPrefix(metrics.MetricEventApproved)) + } + + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count == 0 { + return fmt.Errorf("expected metrics not found: %+v", test.statsdClient.GetByPrefix(metrics.MetricEventApproved)) + } + + return nil + }, retry.Delay(1*time.Second), retry.Attempts(5), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + if err := waitForOpenProbeEvent(test, func() error { fd, err = openTestFile(test, testFile, syscall.O_SYNC) if err != nil { @@ -618,6 +847,20 @@ func TestFilterOpenFlagsApprover(t *testing.T) { t.Fatal(err) } + err = retry.Do(func() error { + test.eventMonitor.SendStats() + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":approver_type:flag"); count == 0 { + return fmt.Errorf("expected metrics not found: %+v", test.statsdClient.GetByPrefix(metrics.MetricEventApproved)) + } + + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count == 0 { + return fmt.Errorf("expected metrics not found: %+v", test.statsdClient.GetByPrefix(metrics.MetricEventApproved)) + } + + return nil + }, retry.Delay(1*time.Second), retry.Attempts(5), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + if err := waitForOpenProbeEvent(test, func() error { fd, err = openTestFile(test, testFile, syscall.O_RDONLY) if err != nil { @@ -796,3 +1039,52 @@ func TestFilterBpfCmd(t *testing.T) { } } } + +func TestFilterRuntimeDiscarded(t *testing.T) { + SkipIfNotAvailable(t) + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_open", + Expression: `open.file.path == "{{.Root}}/no-event"`, + }, + { + ID: "test_unlink", + Expression: `unlink.file.path == "{{.Root}}/no-event"`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs, withStaticOpts(testOpts{discardRuntime: true})) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + testFile, _, err := test.Path("no-event") + if err != nil { + t.Fatal(err) + } + defer os.Remove(testFile) + + // test that we don't receive event from the kernel + if err := waitForOpenProbeEvent(test, func() error { + fd, err := openTestFile(test, testFile, syscall.O_CREAT) + if err != nil { + return err + } + return syscall.Close(fd) + }, testFile); err == nil { + t.Fatal("shouldn't get an event") + } + + // unlink aren't discarded kernel side (inode invalidation) but should be discarded before the rule evaluation + err = test.GetSignal(t, func() error { + return os.Remove(testFile) + }, func(event *model.Event, r *rules.Rule) { + t.Errorf("shouldn't get an event") + }) + + if err == nil { + t.Errorf("shouldn't get an event") + } +} diff --git a/pkg/security/tests/login_uid_test.go b/pkg/security/tests/login_uid_test.go index a71c9e75c526f..5e1e65c9d784f 100644 --- a/pkg/security/tests/login_uid_test.go +++ b/pkg/security/tests/login_uid_test.go @@ -57,8 +57,13 @@ func TestLoginUID(t *testing.T) { t.Run("login-uid-open-test", func(t *testing.T) { test.WaitSignal(t, func() error { - // run the syscall drift test command - cmd := dockerInstance.Command(goSyscallTester, []string{"-login-uid-open-test"}, []string{}) + args := []string{ + "-login-uid-open-test", + "-login-uid-open-path", "/tmp/test-auid", + "-login-uid-open-uid", "1005", + } + + cmd := dockerInstance.Command(goSyscallTester, args, []string{}) _, err = cmd.CombinedOutput() return err }, func(event *model.Event, rule *rules.Rule) { @@ -69,8 +74,12 @@ func TestLoginUID(t *testing.T) { t.Run("login-uid-exec-test", func(t *testing.T) { test.WaitSignal(t, func() error { - // run the syscall drift test command - cmd := dockerInstance.Command(goSyscallTester, []string{"-login-uid-exec-test", "-login-uid-exec-path", goSyscallTester}, []string{}) + args := []string{ + "-login-uid-exec-test", + "-login-uid-exec-path", goSyscallTester, + } + + cmd := dockerInstance.Command(goSyscallTester, args, []string{}) out, err := cmd.CombinedOutput() if err != nil { t.Logf("command exited with an error: out:'%s' err:'%v'", string(out), err) diff --git a/pkg/security/tests/main_linux.go b/pkg/security/tests/main_linux.go index 7de491a609fb9..dd6699444ebd1 100644 --- a/pkg/security/tests/main_linux.go +++ b/pkg/security/tests/main_linux.go @@ -59,6 +59,7 @@ func SkipIfNotAvailable(t *testing.T) { "~TestOpen", "~TestUnlink", "~TestActionKill", + "~TestActionHash", "~TestRmdir", "~TestRename", "~TestMkdir", @@ -101,7 +102,7 @@ func SkipIfNotAvailable(t *testing.T) { "TestLoginUID/login-uid-open-test", "TestLoginUID/login-uid-exec-test", "TestActionKillExcludeBinary", - "TestActionKillDisarm", + "~TestActionKillDisarm", } if disableSeccomp { diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index dcd853df9e258..1b4b9f85399b4 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -26,10 +26,9 @@ import ( "time" "unsafe" + "gopkg.in/yaml.v3" + spconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "go.uber.org/fx" emconfig "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -643,7 +642,7 @@ func assertFieldStringArrayIndexedOneOf(tb *testing.T, e *model.Event, field str return false } -func setTestPolicy(dir string, onDemandProbes []rules.OnDemandHookPoint, macros []*rules.MacroDefinition, rules []*rules.RuleDefinition) (string, error) { +func setTestPolicy(dir string, onDemandProbes []rules.OnDemandHookPoint, macroDefs []*rules.MacroDefinition, ruleDefs []*rules.RuleDefinition) (string, error) { testPolicyFile, err := os.Create(path.Join(dir, "secagent-policy.policy")) if err != nil { return "", err @@ -654,21 +653,19 @@ func setTestPolicy(dir string, onDemandProbes []rules.OnDemandHookPoint, macros return err } - tmpl, err := template.New("test-policy").Parse(testPolicy) - if err != nil { - return "", fail(err) + policyDef := &rules.PolicyDef{ + Version: "1.2.3", + Macros: macroDefs, + Rules: ruleDefs, + OnDemandHookPoints: onDemandProbes, } - buffer := new(bytes.Buffer) - if err := tmpl.Execute(buffer, map[string]interface{}{ - "OnDemandProbes": onDemandProbes, - "Rules": rules, - "Macros": macros, - }); err != nil { + testPolicy, err := yaml.Marshal(policyDef) + if err != nil { return "", fail(err) } - _, err = testPolicyFile.Write(buffer.Bytes()) + _, err = testPolicyFile.Write(testPolicy) if err != nil { return "", fail(err) } @@ -888,10 +885,3 @@ func jsonPathValidation(testMod *testModule, data []byte, fnc func(testMod *test fnc(testMod, obj) } - -type testModuleFxDeps struct { - fx.In - - Telemetry telemetry.Component - WMeta workloadmeta.Component -} diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index da0946b338ce6..024ee1933ffc0 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -34,9 +34,6 @@ import ( "github.com/stretchr/testify/assert" "golang.org/x/sys/unix" - "github.com/DataDog/datadog-agent/comp/core" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/eventmonitor" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -57,7 +54,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/serializers" "github.com/DataDog/datadog-agent/pkg/security/tests/statsdclient" "github.com/DataDog/datadog-agent/pkg/security/utils" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" utilkernel "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -208,61 +204,6 @@ runtime_security_config: period: {{.EnforcementDisarmerExecutablePeriod}} ` -const testPolicy = `--- -version: 1.2.3 - -hooks: -{{range $OnDemandProbe := .OnDemandProbes}} - - name: {{$OnDemandProbe.Name}} - syscall: {{$OnDemandProbe.IsSyscall}} - args: -{{range $Arg := $OnDemandProbe.Args}} - - n: {{$Arg.N}} - kind: {{$Arg.Kind}} -{{end}} -{{end}} - -macros: -{{range $Macro := .Macros}} - - id: {{$Macro.ID}} - expression: >- - {{$Macro.Expression}} -{{end}} - -rules: -{{range $Rule := .Rules}} - - id: {{$Rule.ID}} - version: {{$Rule.Version}} - expression: >- - {{$Rule.Expression}} - disabled: {{$Rule.Disabled}} - tags: -{{- range $Tag, $Val := .Tags}} - {{$Tag}}: {{$Val}} -{{- end}} - actions: -{{- range $Action := .Actions}} -{{- if $Action.Set}} - - set: - name: {{$Action.Set.Name}} - {{- if $Action.Set.Value}} - value: {{$Action.Set.Value}} - {{- else if $Action.Set.Field}} - field: {{$Action.Set.Field}} - {{- end}} - scope: {{$Action.Set.Scope}} - append: {{$Action.Set.Append}} -{{- end}} -{{- if $Action.Kill}} - - kill: - {{- if $Action.Kill.Signal}} - signal: {{$Action.Kill.Signal}} - {{- end}} -{{- end}} -{{- end}} -{{end}} -` - const ( // HostEnvironment for the Host environment HostEnvironment = "host" @@ -713,7 +654,7 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman if testEnvironment == DockerEnvironment || ebpfLessEnabled { cmdWrapper = newStdCmdWrapper() } else { - wrapper, err := newDockerCmdWrapper(st.Root(), st.Root(), "ubuntu") + wrapper, err := newDockerCmdWrapper(st.Root(), st.Root(), "ubuntu", "") if err == nil { cmdWrapper = newMultiCmdWrapper(wrapper, newStdCmdWrapper()) } else { @@ -807,12 +748,11 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman emopts.ProbeOpts.TagsResolver = NewFakeResolverDifferentImageNames() } - fxDeps := fxutil.Test[testModuleFxDeps]( - t, - core.MockBundle(), - wmmock.MockModule(workloadmeta.NewParams()), - ) - testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, fxDeps.WMeta, fxDeps.Telemetry) + if opts.staticOpts.discardRuntime { + emopts.ProbeOpts.DontDiscardRuntime = false + } + + testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, nil) if err != nil { return nil, err } @@ -822,10 +762,13 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman if !opts.staticOpts.disableRuntimeSecurity { msgSender := newFakeMsgSender(testMod) - cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, fxDeps.WMeta, module.Opts{EventSender: testMod, MsgSender: msgSender}) + cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, nil, module.Opts{EventSender: testMod, MsgSender: msgSender}) if err != nil { return nil, fmt.Errorf("failed to create module: %w", err) } + // disable containers telemetry + cws.PrepareForFunctionalTests() + testMod.cws = cws testMod.ruleEngine = cws.GetRuleEngine() testMod.msgSender = msgSender @@ -1312,7 +1255,7 @@ func DecodeSecurityProfile(path string) (*profile.SecurityProfile, error) { func (tm *testModule) StartADocker() (*dockerCmdWrapper, error) { // we use alpine to use nslookup on some tests, and validate all busybox specificities - docker, err := newDockerCmdWrapper(tm.st.Root(), tm.st.Root(), "alpine") + docker, err := newDockerCmdWrapper(tm.st.Root(), tm.st.Root(), "alpine", "") if err != nil { return nil, err } diff --git a/pkg/security/tests/module_tester_windows.go b/pkg/security/tests/module_tester_windows.go index 972437a8ca435..052350b7be636 100644 --- a/pkg/security/tests/module_tester_windows.go +++ b/pkg/security/tests/module_tester_windows.go @@ -18,9 +18,6 @@ import ( "github.com/hashicorp/go-multierror" - "github.com/DataDog/datadog-agent/comp/core" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/eventmonitor" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/events" @@ -31,7 +28,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/tests/statsdclient" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -40,49 +36,6 @@ var ( testActivityDumpLoadControllerPeriod = time.Second * 10 ) -const testPolicy = `--- -version: 1.2.3 - -macros: -{{range $Macro := .Macros}} - - id: {{$Macro.ID}} - expression: >- - {{$Macro.Expression}} -{{end}} - -rules: -{{range $Rule := .Rules}} - - id: {{$Rule.ID}} - version: {{$Rule.Version}} - expression: >- - {{$Rule.Expression}} - tags: -{{- range $Tag, $Val := .Tags}} - {{$Tag}}: {{$Val}} -{{- end}} - actions: -{{- range $Action := .Actions}} -{{- if $Action.Set}} - - set: - name: {{$Action.Set.Name}} - {{- if $Action.Set.Value}} - value: {{$Action.Set.Value}} - {{- else if $Action.Set.Field}} - field: {{$Action.Set.Field}} - {{- end}} - scope: {{$Action.Set.Scope}} - append: {{$Action.Set.Append}} -{{- end}} -{{- if $Action.Kill}} - - kill: - {{- if $Action.Kill.Signal}} - signal: {{$Action.Kill.Signal}} - {{- end}} -{{- end}} -{{- end}} -{{end}} -` - const testConfig = `--- log_level: DEBUG @@ -279,12 +232,7 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs [] StatsdClient: statsdClient, }, } - fxDeps := fxutil.Test[testModuleFxDeps]( - t, - core.MockBundle(), - wmmock.MockModule(workloadmeta.NewParams()), - ) - testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, fxDeps.WMeta, fxDeps.Telemetry) + testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, nil) if err != nil { return nil, err } @@ -292,10 +240,12 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs [] var ruleSetloadedErr *multierror.Error if !opts.staticOpts.disableRuntimeSecurity { - cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, fxDeps.WMeta, module.Opts{EventSender: testMod}) + cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, nil, module.Opts{EventSender: testMod}) if err != nil { return nil, fmt.Errorf("failed to create module: %w", err) } + cws.PrepareForFunctionalTests() + testMod.cws = cws testMod.ruleEngine = cws.GetRuleEngine() diff --git a/pkg/security/tests/mount_test.go b/pkg/security/tests/mount_test.go index 16271a985fa4d..4252d8f553cb8 100644 --- a/pkg/security/tests/mount_test.go +++ b/pkg/security/tests/mount_test.go @@ -503,7 +503,7 @@ func TestMountEvent(t *testing.T) { }) const dockerMountDest = "/host_root" - wrapperTruePositive, err := newDockerCmdWrapper("/", dockerMountDest, "alpine") + wrapperTruePositive, err := newDockerCmdWrapper("/", dockerMountDest, "alpine", "") if err != nil { t.Skip("Skipping mounts in containers tests: Docker not available") return @@ -537,7 +537,7 @@ func TestMountEvent(t *testing.T) { if err = os.Mkdir(legitimateSourcePath, 0755); err != nil { t.Fatal(err) } - wrapperFalsePositive, err := newDockerCmdWrapper(legitimateSourcePath, dockerMountDest, "alpine") + wrapperFalsePositive, err := newDockerCmdWrapper(legitimateSourcePath, dockerMountDest, "alpine", "") if err != nil { t.Fatal(err) } diff --git a/pkg/security/tests/probe_monitor_test.go b/pkg/security/tests/probe_monitor_test.go deleted file mode 100644 index b008fad35376e..0000000000000 --- a/pkg/security/tests/probe_monitor_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build linux && functionaltests - -// Package tests holds tests related files -package tests - -import ( - "os" - "path" - "path/filepath" - "strings" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/DataDog/datadog-agent/pkg/security/events" - "github.com/DataDog/datadog-agent/pkg/security/metrics" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" - "github.com/DataDog/datadog-agent/pkg/security/secl/rules" -) - -func TestRulesetLoaded(t *testing.T) { - SkipIfNotAvailable(t) - - rule := &rules.RuleDefinition{ - ID: "path_test", - Expression: `open.file.path == "/aaaaaaaaaaaaaaaaaaaaaaaaa" && open.flags & O_CREAT != 0`, - } - - test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) - if err != nil { - t.Fatal(err) - } - defer test.Close() - - test.cws.SendStats() - - key := metrics.MetricRuleSetLoaded - assert.NotEmpty(t, test.statsdClient.Get(key)) - assert.NotZero(t, test.statsdClient.Get(key)) - - test.statsdClient.Flush() - - t.Run("ruleset_loaded", func(t *testing.T) { - count := test.statsdClient.Get(key) - assert.Zero(t, count) - - err = test.GetCustomEventSent(t, func() error { - // force a reload - return syscall.Kill(syscall.Getpid(), syscall.SIGHUP) - }, func(rule *rules.Rule, customEvent *events.CustomEvent) bool { - assert.Equal(t, events.RulesetLoadedRuleID, rule.ID, "wrong rule") - - test.cws.SendStats() - - assert.Equal(t, count+1, test.statsdClient.Get(key)) - - return validateRuleSetLoadedSchema(t, customEvent) - }, 20*time.Second, model.CustomRulesetLoadedEventType) - if err != nil { - t.Fatal(err) - } - }) -} - -func TestHeartbeatSent(t *testing.T) { - SkipIfNotAvailable(t) - - rule := &rules.RuleDefinition{ - ID: "path_test", - Expression: `open.file.path == "/aaaaaaaaaaaaaaaaaaaaaaaaa" && open.flags & O_CREAT != 0`, - } - - test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) - if err != nil { - t.Fatal(err) - } - defer test.Close() - - test.cws.SendStats() - - t.Run("heartbeat", func(t *testing.T) { - - err = test.GetCustomEventSent(t, func() error { - // force a reload - return syscall.Kill(syscall.Getpid(), syscall.SIGHUP) - }, func(rule *rules.Rule, customEvent *events.CustomEvent) bool { - - isHeartbeatEvent := events.HeartbeatRuleID == rule.ID - - return validateHeartbeatSchema(t, customEvent) && isHeartbeatEvent - }, 80*time.Second, model.CustomHeartbeatEventType) - if err != nil { - t.Fatal(err) - } - }) -} - -func truncatedParents(t *testing.T, staticOpts testOpts, dynamicOpts dynamicTestOpts) { - var truncatedParents string - for i := 0; i < model.MaxPathDepth; i++ { - truncatedParents += "a/" - } - - rule := &rules.RuleDefinition{ - ID: "path_test", - // because of the truncated path open.file.path will be '/a/a/a/a*' and not '{{.Root}}/a/a/a*' - Expression: `open.file.path =~ "*/a/**" && open.flags & O_CREAT != 0`, - } - - test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}, withStaticOpts(staticOpts), withDynamicOpts(dynamicOpts)) - if err != nil { - t.Fatal(err) - } - defer test.Close() - - truncatedParentsFile, _, err := test.Path(truncatedParents) - if err != nil { - t.Fatal(err) - } - - t.Run("truncated_parents", func(t *testing.T) { - if os.MkdirAll(path.Dir(truncatedParentsFile), 0755) != nil { - t.Fatal(err) - } - - // By default, the `t.TempDir` cleanup has a bit of a hard time cleaning up such a deep file - // let's help it by cleaning up most of the directories - defer cleanupABottomUp(truncatedParentsFile) - - err = test.GetCustomEventSent(t, func() error { - f, err := os.OpenFile(truncatedParentsFile, os.O_CREATE, 0755) - if err != nil { - return err - } - return f.Close() - }, func(rule *rules.Rule, customEvent *events.CustomEvent) bool { - assert.Equal(t, events.AbnormalPathRuleID, rule.ID, "wrong rule") - return true - }, getEventTimeout, model.CustomTruncatedParentsEventType) - if err != nil { - t.Fatal(err) - } - - test.WaitSignal(t, func() error { - f, err := os.OpenFile(truncatedParentsFile, os.O_CREATE, 0755) - if err != nil { - return err - } - return f.Close() - }, func(event *model.Event, rule *rules.Rule) { - // check the length of the filepath that triggered the custom event - filepath, err := event.GetFieldValue("open.file.path") - if err == nil { - splittedFilepath := strings.Split(filepath.(string), "/") - for len(splittedFilepath) > 1 && splittedFilepath[0] != "a" { - // Remove the initial "" and all subsequent parents introduced by the mount point, we only want to - // count the "a"s. - splittedFilepath = splittedFilepath[1:] - } - assert.Equal(t, "a", splittedFilepath[0], "invalid path resolution at the left edge") - assert.Equal(t, "a", splittedFilepath[len(splittedFilepath)-1], "invalid path resolution at the right edge") - assert.Equal(t, model.MaxPathDepth, len(splittedFilepath), "invalid path depth") - } - }) - }) -} - -func cleanupABottomUp(path string) { - for filepath.Base(path) == "a" { - os.RemoveAll(path) - path = filepath.Dir(path) - } -} - -func TestTruncatedParentsMap(t *testing.T) { - SkipIfNotAvailable(t) - - truncatedParents(t, testOpts{disableERPCDentryResolution: true}, dynamicTestOpts{disableAbnormalPathCheck: true}) -} - -func TestTruncatedParentsERPC(t *testing.T) { - SkipIfNotAvailable(t) - - truncatedParents(t, testOpts{disableMapDentryResolution: true}, dynamicTestOpts{disableAbnormalPathCheck: true}) -} diff --git a/pkg/security/tests/process_test.go b/pkg/security/tests/process_test.go index 16780b9d80f9f..518a2f929bd6b 100644 --- a/pkg/security/tests/process_test.go +++ b/pkg/security/tests/process_test.go @@ -1475,7 +1475,7 @@ func TestProcessExecExit(t *testing.T) { execPid = event.ProcessContext.Pid if ebpfLessEnabled { - nsID = event.NSID + nsID = event.ProcessContext.NSID } case model.ExitEventType: @@ -1941,7 +1941,7 @@ func TestProcessBusybox(t *testing.T) { } defer test.Close() - wrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "alpine") + wrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "alpine", "") if err != nil { t.Skip("docker no available") return diff --git a/pkg/security/tests/rmdir_test.go b/pkg/security/tests/rmdir_test.go index fc309a20e447f..071395b56c01d 100644 --- a/pkg/security/tests/rmdir_test.go +++ b/pkg/security/tests/rmdir_test.go @@ -182,21 +182,26 @@ func TestRmdirInvalidate(t *testing.T) { } defer test.Close() - for i := 0; i != 5; i++ { - testFile, _, err := test.Path(fmt.Sprintf("test-rmdir-%d", i)) - if err != nil { - t.Fatal(err) - } + ifSyscallSupported("SYS_RMDIR", func(t *testing.T, syscallNB uintptr) { + for i := 0; i != 5; i++ { + testFile, testFilePtr, err := test.Path(fmt.Sprintf("test-rmdir-%d", i)) + if err != nil { + t.Fatal(err) + } - if err := syscall.Mkdir(testFile, 0777); err != nil { - t.Fatal(err) - } + if err := syscall.Mkdir(testFile, 0777); err != nil { + t.Fatal(err) + } - test.WaitSignal(t, func() error { - return syscall.Rmdir(testFile) - }, func(event *model.Event, rule *rules.Rule) { - assert.Equal(t, "rmdir", event.GetType(), "wrong event type") - assertFieldEqual(t, event, "rmdir.file.path", testFile) - }) - } + test.WaitSignal(t, func() error { + if _, _, errno := syscall.Syscall(syscallNB, uintptr(testFilePtr), 0, 0); errno != 0 { + return error(errno) + } + return nil + }, func(event *model.Event, rule *rules.Rule) { + assert.Equal(t, "rmdir", event.GetType(), "wrong event type") + assertFieldEqual(t, event, "rmdir.file.path", testFile) + }) + } + }) } diff --git a/pkg/security/tests/sbom_test.go b/pkg/security/tests/sbom_test.go index f020ae8688ade..47badc8607bf6 100644 --- a/pkg/security/tests/sbom_test.go +++ b/pkg/security/tests/sbom_test.go @@ -59,7 +59,7 @@ func TestSBOM(t *testing.T) { t.Skip("not supported") } - dockerWrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "ubuntu") + dockerWrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), "ubuntu", "") if err != nil { t.Skip("Skipping sbom tests: Docker not available") return diff --git a/pkg/security/tests/schemas/agent_context.json b/pkg/security/tests/schemas/agent_context.json index 93cef1abb240e..daf8233065056 100644 --- a/pkg/security/tests/schemas/agent_context.json +++ b/pkg/security/tests/schemas/agent_context.json @@ -33,6 +33,9 @@ "oneOf": [ { "$ref": "/schemas/kill.schema.json" + }, + { + "$ref": "/schemas/hash.schema.json" } ] } diff --git a/pkg/security/tests/schemas/hash.schema.json b/pkg/security/tests/schemas/hash.schema.json new file mode 100644 index 0000000000000..1415e736a96b8 --- /dev/null +++ b/pkg/security/tests/schemas/hash.schema.json @@ -0,0 +1,25 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "kill.json", + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "path": { + "type": "string" + }, + "state": { + "type": "string" + }, + "trigger": { + "type": "string" + } + }, + "required": [ + "type", + "path", + "state", + "trigger" + ] +} \ No newline at end of file diff --git a/pkg/security/tests/schemas/policy.schema.json b/pkg/security/tests/schemas/policy.schema.json new file mode 100644 index 0000000000000..5fceb63011b5b --- /dev/null +++ b/pkg/security/tests/schemas/policy.schema.json @@ -0,0 +1,457 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/DataDog/datadog-agent/tree/main/pkg/security/secl/rules", + "$defs": { + "ActionDefinition": { + "oneOf": [ + { + "required": [ + "set" + ], + "title": "SetAction" + }, + { + "required": [ + "kill" + ], + "title": "KillAction" + }, + { + "required": [ + "coredump" + ], + "title": "CoreDumpAction" + }, + { + "required": [ + "hash" + ], + "title": "HashAction" + } + ], + "properties": { + "filter": { + "type": "string" + }, + "set": { + "$ref": "#/$defs/SetDefinition" + }, + "kill": { + "$ref": "#/$defs/KillDefinition" + }, + "coredump": { + "$ref": "#/$defs/CoreDumpDefinition" + }, + "hash": { + "$ref": "#/$defs/HashDefinition" + } + }, + "additionalProperties": false, + "type": "object", + "description": "ActionDefinition describes a rule action section" + }, + "CoreDumpDefinition": { + "anyOf": [ + { + "required": [ + "process" + ], + "title": "CoreDumpWithProcess" + }, + { + "required": [ + "mount" + ], + "title": "CoreDumpWithMount" + }, + { + "required": [ + "dentry" + ], + "title": "CoreDumpWithDentry" + } + ], + "properties": { + "process": { + "type": "boolean" + }, + "mount": { + "type": "boolean" + }, + "dentry": { + "type": "boolean" + }, + "no_compression": { + "type": "boolean" + } + }, + "additionalProperties": false, + "type": "object", + "description": "CoreDumpDefinition describes the 'coredump' action" + }, + "HashDefinition": { + "properties": {}, + "additionalProperties": false, + "type": "object", + "description": "HashDefinition describes the 'hash' section of a rule action" + }, + "HookPointArg": { + "properties": { + "n": { + "type": "integer", + "description": "Zero-based argument index" + }, + "kind": { + "type": "string", + "enum": [ + "uint", + "null-terminated-string" + ] + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "n", + "kind" + ], + "description": "HookPointArg represents the definition of a hook point argument" + }, + "KillDefinition": { + "properties": { + "signal": { + "type": "string", + "description": "A valid signal name", + "examples": [ + "SIGKILL", + "SIGTERM" + ] + }, + "scope": { + "type": "string", + "enum": [ + "process", + "container" + ] + }, + "disarmer": { + "$ref": "#/$defs/KillDisarmerDefinition" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "signal" + ], + "description": "KillDefinition describes the 'kill' section of a rule action" + }, + "KillDisarmerDefinition": { + "properties": { + "container": { + "$ref": "#/$defs/KillDisarmerParamsDefinition" + }, + "executable": { + "$ref": "#/$defs/KillDisarmerParamsDefinition" + } + }, + "additionalProperties": false, + "type": "object", + "description": "KillDisarmerDefinition describes the 'disarmer' section of a kill action" + }, + "KillDisarmerParamsDefinition": { + "properties": { + "max_allowed": { + "type": "integer", + "description": "The maximum number of allowed kill actions within the period", + "examples": [ + 5 + ] + }, + "period": { + "oneOf": [ + { + "type": "string", + "format": "duration", + "description": "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)" + }, + { + "type": "integer", + "description": "Duration in nanoseconds" + } + ], + "description": "The period of time during which the maximum number of allowed kill actions is calculated" + } + }, + "additionalProperties": false, + "type": "object", + "description": "KillDisarmerParamsDefinition describes the parameters of a kill action disarmer" + }, + "MacroDefinition": { + "oneOf": [ + { + "required": [ + "expression" + ], + "title": "MacroWithExpression" + }, + { + "required": [ + "values" + ], + "title": "MacroWithValues" + } + ], + "properties": { + "id": { + "type": "string" + }, + "expression": { + "type": "string" + }, + "description": { + "type": "string" + }, + "agent_version": { + "type": "string" + }, + "filters": { + "items": { + "type": "string" + }, + "type": "array" + }, + "values": { + "items": { + "type": "string" + }, + "type": "array" + }, + "combine": { + "type": "string", + "enum": [ + "merge", + "override" + ] + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "id" + ], + "description": "MacroDefinition holds the definition of a macro" + }, + "OnDemandHookPoint": { + "properties": { + "name": { + "type": "string" + }, + "syscall": { + "type": "boolean" + }, + "args": { + "items": { + "$ref": "#/$defs/HookPointArg" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name" + ], + "description": "OnDemandHookPoint represents a hook point definition" + }, + "OverrideOptions": { + "properties": { + "fields": { + "items": { + "type": "string", + "enum": [ + "all", + "expression", + "actions", + "every", + "tags" + ] + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "fields" + ], + "description": "OverrideOptions defines combine options" + }, + "RuleDefinition": { + "properties": { + "id": { + "type": "string" + }, + "version": { + "type": "string" + }, + "expression": { + "type": "string" + }, + "description": { + "type": "string" + }, + "tags": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "agent_version": { + "type": "string" + }, + "filters": { + "items": { + "type": "string" + }, + "type": "array" + }, + "disabled": { + "type": "boolean" + }, + "combine": { + "type": "string", + "enum": [ + "override" + ] + }, + "override_options": { + "$ref": "#/$defs/OverrideOptions" + }, + "actions": { + "items": { + "$ref": "#/$defs/ActionDefinition" + }, + "type": "array" + }, + "every": { + "oneOf": [ + { + "type": "string", + "format": "duration", + "description": "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)" + }, + { + "type": "integer", + "description": "Duration in nanoseconds" + } + ] + }, + "limiter_token": { + "items": { + "type": "string" + }, + "type": "array" + }, + "silent": { + "type": "boolean" + }, + "group_id": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "id" + ], + "description": "RuleDefinition holds the definition of a rule" + }, + "SetDefinition": { + "oneOf": [ + { + "required": [ + "value" + ], + "title": "SetWithValue" + }, + { + "required": [ + "field" + ], + "title": "SetWithField" + } + ], + "properties": { + "name": { + "type": "string" + }, + "value": true, + "field": { + "type": "string" + }, + "append": { + "type": "boolean" + }, + "scope": { + "type": "string", + "enum": [ + "process", + "container" + ] + }, + "size": { + "type": "integer" + }, + "ttl": { + "oneOf": [ + { + "type": "string", + "format": "duration", + "description": "Duration in Go format (e.g. 1h30m, see https://pkg.go.dev/time#ParseDuration)" + }, + { + "type": "integer", + "description": "Duration in nanoseconds" + } + ] + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name" + ], + "description": "SetDefinition describes the 'set' section of a rule action" + } + }, + "properties": { + "version": { + "type": "string" + }, + "macros": { + "items": { + "$ref": "#/$defs/MacroDefinition" + }, + "type": "array" + }, + "rules": { + "items": { + "$ref": "#/$defs/RuleDefinition" + }, + "type": "array" + }, + "hooks": { + "items": { + "$ref": "#/$defs/OnDemandHookPoint" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "version", + "rules" + ], + "description": "PolicyDef represents a policy file definition" +} \ No newline at end of file diff --git a/pkg/security/tests/syscall_tester/c/syscall_tester.c b/pkg/security/tests/syscall_tester/c/syscall_tester.c index 948ff885ab198..1a1cc06e800dc 100644 --- a/pkg/security/tests/syscall_tester/c/syscall_tester.c +++ b/pkg/security/tests/syscall_tester/c/syscall_tester.c @@ -642,8 +642,8 @@ int test_sleep(int argc, char **argv) { if (duration <= 0) { fprintf(stderr, "Please specify at a valid sleep duration\n"); } - for (int i = 0; i < duration; i++) - sleep(1); + sleep(duration); + return EXIT_SUCCESS; } @@ -659,8 +659,28 @@ int test_slow_cat(int argc, char **argv) { if (duration <= 0) { fprintf(stderr, "Please specify at a valid sleep duration\n"); } - for (int i = 0; i < duration; i++) - sleep(1); + sleep(duration); + + close(fd); + + return EXIT_SUCCESS; +} + +int test_slow_write(int argc, char **argv) { + if (argc != 4) { + fprintf(stderr, "%s: Please pass a duration in seconds, a path, and a content.\n", __FUNCTION__); + return EXIT_FAILURE; + } + + int duration = atoi(argv[1]); + int fd = open(argv[2], O_CREAT|O_WRONLY); + + if (duration <= 0) { + fprintf(stderr, "Please specify at a valid sleep duration\n"); + } + sleep(duration); + + write(fd, argv[3], strlen(argv[3])); close(fd); @@ -793,7 +813,10 @@ int main(int argc, char **argv) { exit_code = test_new_netns_exec(sub_argc, sub_argv); } else if (strcmp(cmd, "slow-cat") == 0) { exit_code = test_slow_cat(sub_argc, sub_argv); - } else { + } else if (strcmp(cmd, "slow-write") == 0) { + exit_code = test_slow_write(sub_argc, sub_argv); + } + else { fprintf(stderr, "Unknown command `%s`\n", cmd); exit_code = EXIT_FAILURE; } diff --git a/pkg/security/tests/syscall_tester/go/syscall_go_tester.go b/pkg/security/tests/syscall_tester/go/syscall_go_tester.go index 454ade057c8d0..c8c72ff67831b 100644 --- a/pkg/security/tests/syscall_tester/go/syscall_go_tester.go +++ b/pkg/security/tests/syscall_tester/go/syscall_go_tester.go @@ -43,6 +43,8 @@ var ( userSessionOpenPath string syscallDriftTest bool loginUIDOpenTest bool + loginUIDOpenPath string + loginUIDOpenUID int loginUIDExecTest bool loginUIDExecPath string ) @@ -231,25 +233,22 @@ func setSelfLoginUID(uid int) error { } func RunLoginUIDOpenTest() error { - if err := setSelfLoginUID(1005); err != nil { - return err + if loginUIDOpenUID != -1 { + if err := setSelfLoginUID(loginUIDOpenUID); err != nil { + return err + } } - testAUIDPath := "/tmp/test-auid" - // open test file to trigger an event - f, err := os.OpenFile(testAUIDPath, os.O_RDWR|os.O_CREATE, 0755) + f, err := os.OpenFile(loginUIDOpenPath, os.O_RDWR|os.O_CREATE, 0755) if err != nil { return fmt.Errorf("couldn't create test-auid file: %v", err) } + defer os.Remove(loginUIDOpenPath) if err = f.Close(); err != nil { return fmt.Errorf("couldn't close test file: %v", err) } - - if err = os.Remove(testAUIDPath); err != nil { - return fmt.Errorf("failed to remove test-auid file: %v", err) - } return nil } @@ -279,6 +278,8 @@ func main() { flag.BoolVar(&runIMDSTest, "run-imds-test", false, "when set, binds an IMDS server locally and sends a query to it") flag.BoolVar(&syscallDriftTest, "syscall-drift-test", false, "when set, runs the syscall drift test") flag.BoolVar(&loginUIDOpenTest, "login-uid-open-test", false, "when set, runs the login_uid open test") + flag.StringVar(&loginUIDOpenPath, "login-uid-open-path", "", "file used for the login_uid open test") + flag.IntVar(&loginUIDOpenUID, "login-uid-open-uid", 0, "uid used for the login_uid open test") flag.BoolVar(&loginUIDExecTest, "login-uid-exec-test", false, "when set, runs the login_uid exec test") flag.StringVar(&loginUIDExecPath, "login-uid-exec-path", "", "path to the executable to run during the login_uid exec test") diff --git a/pkg/security/tests/testopts.go b/pkg/security/tests/testopts.go index 43ef3dc1bbebf..d8eb578131e45 100644 --- a/pkg/security/tests/testopts.go +++ b/pkg/security/tests/testopts.go @@ -71,6 +71,7 @@ type testOpts struct { enforcementDisarmerExecutableMaxAllowed int enforcementDisarmerExecutablePeriod time.Duration eventServerRetention time.Duration + discardRuntime bool } type dynamicTestOpts struct { @@ -153,5 +154,6 @@ func (to testOpts) Equal(opts testOpts) bool { to.enforcementDisarmerExecutableEnabled == opts.enforcementDisarmerExecutableEnabled && to.enforcementDisarmerExecutableMaxAllowed == opts.enforcementDisarmerExecutableMaxAllowed && to.enforcementDisarmerExecutablePeriod == opts.enforcementDisarmerExecutablePeriod && - to.eventServerRetention == opts.eventServerRetention + to.eventServerRetention == opts.eventServerRetention && + to.discardRuntime == opts.discardRuntime } diff --git a/pkg/security/tests/usergroup_test.go b/pkg/security/tests/usergroup_test.go index c2a6dcc58409c..3f48684ca217c 100644 --- a/pkg/security/tests/usergroup_test.go +++ b/pkg/security/tests/usergroup_test.go @@ -127,7 +127,7 @@ func TestUserGroup(t *testing.T) { for _, distroTest := range distroTests { t.Run(distroTest.name, func(t *testing.T) { - dockerWrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), distroTest.name) + dockerWrapper, err := newDockerCmdWrapper(test.Root(), test.Root(), distroTest.name, "") if err != nil { t.Fatal(err) } diff --git a/pkg/security/utils/hostname.go b/pkg/security/utils/hostname.go index 39988f41e1be3..1968802bd42ed 100644 --- a/pkg/security/utils/hostname.go +++ b/pkg/security/utils/hostname.go @@ -13,7 +13,7 @@ import ( "github.com/avast/retry-go/v4" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -60,12 +60,12 @@ func getHostnameFromAgent(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - client, err := grpc.GetDDAgentClient(ctx, ipcAddress, config.GetIPCPort()) + client, err := grpc.GetDDAgentClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } diff --git a/pkg/security/utils/limiter.go b/pkg/security/utils/limiter.go index d4fc6fcf6222b..ab9d0eef066d0 100644 --- a/pkg/security/utils/limiter.go +++ b/pkg/security/utils/limiter.go @@ -9,9 +9,8 @@ package utils import ( "time" - "go.uber.org/atomic" - "github.com/hashicorp/golang-lru/v2/simplelru" + "go.uber.org/atomic" ) type cacheEntry struct { @@ -38,8 +37,8 @@ type Limiter[K comparable] struct { } // NewLimiter returns a rate limiter that is sized to the configured number of unique tokens, and each unique token is allowed 'numAllowedTokensPerPeriod' times per 'period'. -func NewLimiter[K comparable](numUniqueTokens int, numAllowedTokensPerPeriod int, period time.Duration) (*Limiter[K], error) { - cache, err := simplelru.NewLRU[K, *cacheEntry](numUniqueTokens, nil) +func NewLimiter[K comparable](maxUniqueToken int, numAllowedTokensPerPeriod int, period time.Duration) (*Limiter[K], error) { + cache, err := simplelru.NewLRU[K, *cacheEntry](maxUniqueToken, nil) if err != nil { return nil, err } diff --git a/pkg/security/utils/path_linux.go b/pkg/security/utils/path_linux.go index 4fa2cf4ea0dbb..d2eb4b5971a12 100644 --- a/pkg/security/utils/path_linux.go +++ b/pkg/security/utils/path_linux.go @@ -5,6 +5,15 @@ package utils +import ( + "fmt" + "regexp" + "slices" + "strings" + + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) + // PathPatternMatchOpts PathPatternMatch options type PathPatternMatchOpts struct { WildcardLimit int // max number of wildcard in the pattern @@ -108,3 +117,163 @@ func PathPatternMatch(pattern string, path string, opts PathPatternMatchOpts) bo return false } + +// PathPatternBuilder pattern builder for files +func PathPatternBuilder(pattern string, path string, opts PathPatternMatchOpts) (string, bool) { + lenMax := len(pattern) + if l := len(path); l > lenMax { + lenMax = l + } + + var ( + i, j = 0, 0 + wildcardCount, nodeCount, suffixNode = 0, 0, 0 + offsetPattern, offsetPath, size = 0, 0, 0 + patternLen, pathLen = len(pattern), len(path) + wildcard bool + result = make([]byte, lenMax) + + computeNode = func() bool { + if wildcard { + wildcardCount++ + if wildcardCount > opts.WildcardLimit { + return false + } + if nodeCount < opts.PrefixNodeRequired { + return false + } + if opts.NodeSizeLimit != 0 && j-offsetPath < opts.NodeSizeLimit { + return false + } + + result[size], result[size+1] = '/', '*' + size += 2 + + offsetPattern, suffixNode = i, 0 + } else { + copy(result[size:], pattern[offsetPattern:i]) + size += i - offsetPattern + offsetPattern = i + suffixNode++ + } + + offsetPath = j + + if i > 0 { + nodeCount++ + } + return true + } + ) + + if patternLen > 0 && pattern[0] != '/' { + return "", false + } + + if pathLen > 0 && path[0] != '/' { + return "", false + } + + for i < len(pattern) && j < len(path) { + pn, ph := pattern[i], path[j] + if pn == '/' && ph == '/' { + if !computeNode() { + return "", false + } + wildcard = false + + i++ + j++ + continue + } + + if pn != ph { + wildcard = true + } + if pn != '/' { + i++ + } + if ph != '/' { + j++ + } + } + + if patternLen != i || pathLen != j { + wildcard = true + } + + for i < patternLen { + if pattern[i] == '/' { + return "", false + } + i++ + } + + for j < pathLen { + if path[j] == '/' { + return "", false + } + j++ + } + + if !computeNode() { + return "", false + } + + if opts.SuffixNodeRequired == 0 || suffixNode >= opts.SuffixNodeRequired { + return string(result[:size]), true + } + + return "", false +} + +// BuildPatterns find and build patterns for the path in the ruleset +func BuildPatterns(ruleset []*rules.RuleDefinition) []*rules.RuleDefinition { + for _, rule := range ruleset { + findAndReplacePatterns(&rule.Expression) + } + return ruleset +} + +func findAndReplacePatterns(expression *string) { + + re := regexp.MustCompile(`\[(.*?)\]`) + matches := re.FindAllStringSubmatch(*expression, -1) + for _, match := range matches { + if len(match) > 1 { + arrayContent := match[1] + paths := replacePatterns(strings.Split(arrayContent, ",")) + // reconstruct the modified array as a string + modifiedArrayString := "[" + strings.Join(paths, ", ") + "]" + // replace the original array with the modified array in the input string + *expression = strings.Replace(*expression, match[0], modifiedArrayString, 1) + } + } + +} + +func replacePatterns(paths []string) []string { + var result []string + for _, pattern := range paths { + strippedPattern := strings.Trim(pattern, `~" `) + initalLength := len(result) + for _, path := range paths { + strippedPath := strings.Trim(path, `~" `) + if pattern == path { + continue + } + finalPath, ok := PathPatternBuilder(strippedPattern, strippedPath, PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 4}) + if ok { + finalPath = fmt.Sprintf("~\"%s\"", finalPath) + result = append(result, finalPath) + } + } + if len(result) == initalLength { + result = append(result, strings.Trim(pattern, ` `)) + } + } + // remove duplicates + slices.Sort(result) + result = slices.Compact(result) + return result +} diff --git a/pkg/security/utils/path_linux_test.go b/pkg/security/utils/path_linux_test.go index b4049c1e34099..a62b29c6ddae6 100644 --- a/pkg/security/utils/path_linux_test.go +++ b/pkg/security/utils/path_linux_test.go @@ -244,7 +244,278 @@ func TestPathPatternMatch(t *testing.T) { } } -func BenchmarkPathPatternBuilder(b *testing.B) { +func TestPathPatternBuilder(t *testing.T) { + tests := []struct { + Pattern string + Path string + Opts PathPatternMatchOpts + ExpectedResult bool + ExpectedPattern string + }{ + { + Pattern: "/etc/passwd", + Path: "/etc/passwd", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/etc/passwd", + }, + { + Pattern: "/bin/baz", + Path: "/bin/baz2", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/bin/*", + }, + { + Pattern: "/abc/12312/sad", + Path: "/abc/51231", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/sad/", + Path: "/abc/51231", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/sad/", + Path: "/abc/51231/", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/sad", + Path: "/abc/51231/", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312", + Path: "/abc/51231/sad", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312", + Path: "/abc/51231/sad/", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/", + Path: "/abc/51231/sad/", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/", + Path: "/abc/51231/sad", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/12312", + Path: "/51231", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/*", + }, + { + Pattern: "12312", + Path: "51231", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "", + Path: "", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "", + }, + { + Pattern: "/bin/baz2", + Path: "/bin/baz", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/bin/*", + }, + { + Pattern: "/etc/http", + Path: "/etc/passwd", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/etc/*", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/54321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/12345/runc.pid", + Path: "/var/run/5432/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/12345/12345/runc.pid", + Path: "/var/run/54321/54321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/12345/12345/runc.pid", + Path: "/var/run/54321/54321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 2}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/*/runc.pid", + }, + { + Pattern: "/12345/12345/runc.pid", + Path: "/54321/12345/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/*/12345/runc.pid", + }, + { + Pattern: "/var/runc/12345", + Path: "/var/runc/54321", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/runc/*", + }, + { + Pattern: "/var/runc12345", + Path: "/var/runc54321", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/*", + }, + { + Pattern: "/var/run/12345/runc.pid", + Path: "/var/run/12/45/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/66/45/runc.pid", + Path: "/var/run/12345/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/12345/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/4321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/sdfgh/runc.pid", + Path: "/var/run/hgfds/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 3}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/4321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2, SuffixNodeRequired: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/4321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2, SuffixNodeRequired: 2}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var", + Path: "/var", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2}, + ExpectedResult: true, + ExpectedPattern: "/var", + }, + { + Pattern: "/var", + Path: "/var", + Opts: PathPatternMatchOpts{WildcardLimit: 1, SuffixNodeRequired: 2}, + ExpectedResult: true, + ExpectedPattern: "/var", + }, + { + Pattern: "/var/run/1234/http.pid", + Path: "/var/run/4321/http.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, NodeSizeLimit: 10}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/1234/mysql.pid", + Path: "/var/run/4321/mysql.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, NodeSizeLimit: 4}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/mysql.pid", + }, + { + Pattern: "/bin/baz2", + Path: "/bin/baz", + Opts: PathPatternMatchOpts{WildcardLimit: 1, NodeSizeLimit: 6}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/bin/baz2", + Path: "/bin/baz", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/bin/baz2", + Path: "/bin/baz", + Opts: PathPatternMatchOpts{WildcardLimit: 1, SuffixNodeRequired: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + } + + for _, test := range tests { + t.Run("test", func(t *testing.T) { + p, r := PathPatternBuilder(test.Pattern, test.Path, test.Opts) + assert.Equal(t, test.ExpectedPattern, p, "%s vs %s", test.Pattern, test.Path) + assert.Equal(t, test.ExpectedResult, r, "%s vs %s", test.Pattern, test.Path) + }) + } +} + +func BenchmarkPathPatternMatch(b *testing.B) { b.Run("pattern", func(b *testing.B) { for i := 0; i < b.N; i++ { PathPatternMatch("/var/run/1234/runc.pid", "/var/run/54321/runc.pid", PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2, SuffixNodeRequired: 2}) diff --git a/pkg/security/utils/proc_linux.go b/pkg/security/utils/proc_linux.go index cfb0894125b81..6746091231e65 100644 --- a/pkg/security/utils/proc_linux.go +++ b/pkg/security/utils/proc_linux.go @@ -12,6 +12,7 @@ import ( "io" "os" "path" + "path/filepath" "regexp" "strconv" "strings" @@ -122,17 +123,20 @@ func ProcRootFilePath(pid uint32, file string) string { return procPidPath2(pid, "root", file) } +// we do not use `HostProc` here because of the double call to `filepath.Join` +// and those functions can be called in a tight loop + func procPidPath(pid uint32, path string) string { - return kernel.HostProc(strconv.FormatUint(uint64(pid), 10), path) + return filepath.Join(kernel.ProcFSRoot(), strconv.FormatUint(uint64(pid), 10), path) } func procPidPath2(pid uint32, path1 string, path2 string) string { - return kernel.HostProc(strconv.FormatUint(uint64(pid), 10), path1, path2) + return filepath.Join(kernel.ProcFSRoot(), strconv.FormatUint(uint64(pid), 10), path1, path2) } // ModulesPath returns the path to the modules file in /proc func ModulesPath() string { - return kernel.HostProc("modules") + return filepath.Join(kernel.ProcFSRoot(), "modules") } // GetLoginUID returns the login uid of the provided process diff --git a/pkg/security/utils/stat_unix.go b/pkg/security/utils/stat_unix.go new file mode 100644 index 0000000000000..4c12a3b6c8178 --- /dev/null +++ b/pkg/security/utils/stat_unix.go @@ -0,0 +1,59 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build unix + +// Package utils holds utils related files +package utils + +import ( + "io/fs" + "syscall" +) + +// UnixStat is an unix only equivalent to os.Stat, but alloc-free, +// and returning directly the platform-specific syscall.Stat_t structure. +func UnixStat(path string) (syscall.Stat_t, error) { + var stat syscall.Stat_t + var err error + for { + err := syscall.Stat(path, &stat) + if err != syscall.EINTR { + break + } + } + return stat, err +} + +// UnixStatModeToGoFileMode converts a Unix mode to a Go fs.FileMode. +func UnixStatModeToGoFileMode(mode uint32) fs.FileMode { + fsmode := fs.FileMode(mode & 0777) + switch mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fsmode |= fs.ModeDevice + case syscall.S_IFCHR: + fsmode |= fs.ModeDevice | fs.ModeCharDevice + case syscall.S_IFDIR: + fsmode |= fs.ModeDir + case syscall.S_IFIFO: + fsmode |= fs.ModeNamedPipe + case syscall.S_IFLNK: + fsmode |= fs.ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fsmode |= fs.ModeSocket + } + if mode&syscall.S_ISGID != 0 { + fsmode |= fs.ModeSetgid + } + if mode&syscall.S_ISUID != 0 { + fsmode |= fs.ModeSetuid + } + if mode&syscall.S_ISVTX != 0 { + fsmode |= fs.ModeSticky + } + return fsmode +} diff --git a/pkg/security/utils/syscalls.go b/pkg/security/utils/syscalls.go new file mode 100644 index 0000000000000..6e38c0a34f62f --- /dev/null +++ b/pkg/security/utils/syscalls.go @@ -0,0 +1,733 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package utils groups multiple utils function that can be used by the secl package +package utils + +// SyscallKey key representing the arch and syscall id +type SyscallKey struct { + Arch string + ID int +} + +// Syscalls maps the (arch,syscall_id) to the syscall string +var Syscalls = map[SyscallKey]string{ + // amd64 syscalls + {"amd64", 0}: "read", + {"amd64", 1}: "write", + {"amd64", 2}: "open", + {"amd64", 3}: "close", + {"amd64", 4}: "stat", + {"amd64", 5}: "fstat", + {"amd64", 6}: "lstat", + {"amd64", 7}: "poll", + {"amd64", 8}: "lseek", + {"amd64", 9}: "mmap", + {"amd64", 10}: "mprotect", + {"amd64", 11}: "munmap", + {"amd64", 12}: "brk", + {"amd64", 13}: "rt_sigaction", + {"amd64", 14}: "rt_sigprocmask", + {"amd64", 15}: "rt_sigreturn", + {"amd64", 16}: "ioctl", + {"amd64", 17}: "pread64", + {"amd64", 18}: "pwrite64", + {"amd64", 19}: "readv", + {"amd64", 20}: "writev", + {"amd64", 21}: "access", + {"amd64", 22}: "pipe", + {"amd64", 23}: "select", + {"amd64", 24}: "sched_yield", + {"amd64", 25}: "mremap", + {"amd64", 26}: "msync", + {"amd64", 27}: "mincore", + {"amd64", 28}: "madvise", + {"amd64", 29}: "shmget", + {"amd64", 30}: "shmat", + {"amd64", 31}: "shmctl", + {"amd64", 32}: "dup", + {"amd64", 33}: "dup2", + {"amd64", 34}: "pause", + {"amd64", 35}: "nanosleep", + {"amd64", 36}: "getitimer", + {"amd64", 37}: "alarm", + {"amd64", 38}: "setitimer", + {"amd64", 39}: "getpid", + {"amd64", 40}: "sendfile", + {"amd64", 41}: "socket", + {"amd64", 42}: "connect", + {"amd64", 43}: "accept", + {"amd64", 44}: "sendto", + {"amd64", 45}: "recvfrom", + {"amd64", 46}: "sendmsg", + {"amd64", 47}: "recvmsg", + {"amd64", 48}: "shutdown", + {"amd64", 49}: "bind", + {"amd64", 50}: "listen", + {"amd64", 51}: "getsockname", + {"amd64", 52}: "getpeername", + {"amd64", 53}: "socketpair", + {"amd64", 54}: "setsockopt", + {"amd64", 55}: "getsockopt", + {"amd64", 56}: "clone", + {"amd64", 57}: "fork", + {"amd64", 58}: "vfork", + {"amd64", 59}: "execve", + {"amd64", 60}: "exit", + {"amd64", 61}: "wait4", + {"amd64", 62}: "kill", + {"amd64", 63}: "uname", + {"amd64", 64}: "semget", + {"amd64", 65}: "semop", + {"amd64", 66}: "semctl", + {"amd64", 67}: "shmdt", + {"amd64", 68}: "msgget", + {"amd64", 69}: "msgsnd", + {"amd64", 70}: "msgrcv", + {"amd64", 71}: "msgctl", + {"amd64", 72}: "fcntl", + {"amd64", 73}: "flock", + {"amd64", 74}: "fsync", + {"amd64", 75}: "fdatasync", + {"amd64", 76}: "truncate", + {"amd64", 77}: "ftruncate", + {"amd64", 78}: "getdents", + {"amd64", 79}: "getcwd", + {"amd64", 80}: "chdir", + {"amd64", 81}: "fchdir", + {"amd64", 82}: "rename", + {"amd64", 83}: "mkdir", + {"amd64", 84}: "rmdir", + {"amd64", 85}: "creat", + {"amd64", 86}: "link", + {"amd64", 87}: "unlink", + {"amd64", 88}: "symlink", + {"amd64", 89}: "readlink", + {"amd64", 90}: "chmod", + {"amd64", 91}: "fchmod", + {"amd64", 92}: "chown", + {"amd64", 93}: "fchown", + {"amd64", 94}: "lchown", + {"amd64", 95}: "umask", + {"amd64", 96}: "gettimeofday", + {"amd64", 97}: "getrlimit", + {"amd64", 98}: "getrusage", + {"amd64", 99}: "sysinfo", + {"amd64", 100}: "times", + {"amd64", 101}: "ptrace", + {"amd64", 102}: "getuid", + {"amd64", 103}: "syslog", + {"amd64", 104}: "getgid", + {"amd64", 105}: "setuid", + {"amd64", 106}: "setgid", + {"amd64", 107}: "geteuid", + {"amd64", 108}: "getegid", + {"amd64", 109}: "setpgid", + {"amd64", 110}: "getppid", + {"amd64", 111}: "getpgrp", + {"amd64", 112}: "setsid", + {"amd64", 113}: "setreuid", + {"amd64", 114}: "setregid", + {"amd64", 115}: "getgroups", + {"amd64", 116}: "setgroups", + {"amd64", 117}: "setresuid", + {"amd64", 118}: "getresuid", + {"amd64", 119}: "setresgid", + {"amd64", 120}: "getresgid", + {"amd64", 121}: "getpgid", + {"amd64", 122}: "setfsuid", + {"amd64", 123}: "setfsgid", + {"amd64", 124}: "getsid", + {"amd64", 125}: "capget", + {"amd64", 126}: "capset", + {"amd64", 127}: "rt_sigpending", + {"amd64", 128}: "rt_sigtimedwait", + {"amd64", 129}: "rt_sigqueueinfo", + {"amd64", 130}: "rt_sigsuspend", + {"amd64", 131}: "sigaltstack", + {"amd64", 132}: "utime", + {"amd64", 133}: "mknod", + {"amd64", 134}: "uselib", + {"amd64", 135}: "personality", + {"amd64", 136}: "ustat", + {"amd64", 137}: "statfs", + {"amd64", 138}: "fstatfs", + {"amd64", 139}: "sysfs", + {"amd64", 140}: "getpriority", + {"amd64", 141}: "setpriority", + {"amd64", 142}: "sched_setparam", + {"amd64", 143}: "sched_getparam", + {"amd64", 144}: "sched_setscheduler", + {"amd64", 145}: "sched_getscheduler", + {"amd64", 146}: "sched_get_priority_max", + {"amd64", 147}: "sched_get_priority_min", + {"amd64", 148}: "sched_rr_get_interval", + {"amd64", 149}: "mlock", + {"amd64", 150}: "munlock", + {"amd64", 151}: "mlockall", + {"amd64", 152}: "munlockall", + {"amd64", 153}: "vhangup", + {"amd64", 154}: "modify_ldt", + {"amd64", 155}: "pivot_root", + {"amd64", 156}: "sysctl", + {"amd64", 157}: "prctl", + {"amd64", 158}: "arch_prctl", + {"amd64", 159}: "adjtimex", + {"amd64", 160}: "setrlimit", + {"amd64", 161}: "chroot", + {"amd64", 162}: "sync", + {"amd64", 163}: "acct", + {"amd64", 164}: "settimeofday", + {"amd64", 165}: "mount", + {"amd64", 166}: "umount2", + {"amd64", 167}: "swapon", + {"amd64", 168}: "swapoff", + {"amd64", 169}: "reboot", + {"amd64", 170}: "sethostname", + {"amd64", 171}: "setdomainname", + {"amd64", 172}: "iopl", + {"amd64", 173}: "ioperm", + {"amd64", 174}: "create_module", + {"amd64", 175}: "init_module", + {"amd64", 176}: "delete_module", + {"amd64", 177}: "get_kernel_syms", + {"amd64", 178}: "query_module", + {"amd64", 179}: "quotactl", + {"amd64", 180}: "nfsservctl", + {"amd64", 181}: "getpmsg", + {"amd64", 182}: "putpmsg", + {"amd64", 183}: "afs_syscall", + {"amd64", 184}: "tuxcall", + {"amd64", 185}: "security", + {"amd64", 186}: "gettid", + {"amd64", 187}: "readahead", + {"amd64", 188}: "setxattr", + {"amd64", 189}: "lsetxattr", + {"amd64", 190}: "fsetxattr", + {"amd64", 191}: "getxattr", + {"amd64", 192}: "lgetxattr", + {"amd64", 193}: "fgetxattr", + {"amd64", 194}: "listxattr", + {"amd64", 195}: "llistxattr", + {"amd64", 196}: "flistxattr", + {"amd64", 197}: "removexattr", + {"amd64", 198}: "lremovexattr", + {"amd64", 199}: "fremovexattr", + {"amd64", 200}: "tkill", + {"amd64", 201}: "time", + {"amd64", 202}: "futex", + {"amd64", 203}: "sched_setaffinity", + {"amd64", 204}: "sched_getaffinity", + {"amd64", 205}: "set_thread_area", + {"amd64", 206}: "io_setup", + {"amd64", 207}: "io_destroy", + {"amd64", 208}: "io_getevents", + {"amd64", 209}: "io_submit", + {"amd64", 210}: "io_cancel", + {"amd64", 211}: "get_thread_area", + {"amd64", 212}: "lookup_dcookie", + {"amd64", 213}: "epoll_create", + {"amd64", 214}: "epoll_ctl_old", + {"amd64", 215}: "epoll_wait_old", + {"amd64", 216}: "remap_file_pages", + {"amd64", 217}: "getdents64", + {"amd64", 218}: "set_tid_address", + {"amd64", 219}: "restart_syscall", + {"amd64", 220}: "semtimedop", + {"amd64", 221}: "fadvise64", + {"amd64", 222}: "timer_create", + {"amd64", 223}: "timer_settime", + {"amd64", 224}: "timer_gettime", + {"amd64", 225}: "timer_getoverrun", + {"amd64", 226}: "timer_delete", + {"amd64", 227}: "clock_settime", + {"amd64", 228}: "clock_gettime", + {"amd64", 229}: "clock_getres", + {"amd64", 230}: "clock_nanosleep", + {"amd64", 231}: "exit_group", + {"amd64", 232}: "epoll_wait", + {"amd64", 233}: "epoll_ctl", + {"amd64", 234}: "tgkill", + {"amd64", 235}: "utimes", + {"amd64", 236}: "vserver", + {"amd64", 237}: "mbind", + {"amd64", 238}: "set_mempolicy", + {"amd64", 239}: "get_mempolicy", + {"amd64", 240}: "mq_open", + {"amd64", 241}: "mq_unlink", + {"amd64", 242}: "mq_timedsend", + {"amd64", 243}: "mq_timedreceive", + {"amd64", 244}: "mq_notify", + {"amd64", 245}: "mq_getsetattr", + {"amd64", 246}: "kexec_load", + {"amd64", 247}: "waitid", + {"amd64", 248}: "add_key", + {"amd64", 249}: "request_key", + {"amd64", 250}: "keyctl", + {"amd64", 251}: "ioprio_set", + {"amd64", 252}: "ioprio_get", + {"amd64", 253}: "inotify_init", + {"amd64", 254}: "inotify_add_watch", + {"amd64", 255}: "inotify_rm_watch", + {"amd64", 256}: "migrate_pages", + {"amd64", 257}: "openat", + {"amd64", 258}: "mkdirat", + {"amd64", 259}: "mknodat", + {"amd64", 260}: "fchownat", + {"amd64", 261}: "futimesat", + {"amd64", 262}: "newfstatat", + {"amd64", 263}: "unlinkat", + {"amd64", 264}: "renameat", + {"amd64", 265}: "linkat", + {"amd64", 266}: "symlinkat", + {"amd64", 267}: "readlinkat", + {"amd64", 268}: "fchmodat", + {"amd64", 269}: "faccessat", + {"amd64", 270}: "pselect6", + {"amd64", 271}: "ppoll", + {"amd64", 272}: "unshare", + {"amd64", 273}: "set_robust_list", + {"amd64", 274}: "get_robust_list", + {"amd64", 275}: "splice", + {"amd64", 276}: "tee", + {"amd64", 277}: "sync_file_range", + {"amd64", 278}: "vmsplice", + {"amd64", 279}: "move_pages", + {"amd64", 280}: "utimensat", + {"amd64", 281}: "epoll_pwait", + {"amd64", 282}: "signalfd", + {"amd64", 283}: "timerfd_create", + {"amd64", 284}: "eventfd", + {"amd64", 285}: "fallocate", + {"amd64", 286}: "timerfd_settime", + {"amd64", 287}: "timerfd_gettime", + {"amd64", 288}: "accept4", + {"amd64", 289}: "signalfd4", + {"amd64", 290}: "eventfd2", + {"amd64", 291}: "epoll_create1", + {"amd64", 292}: "dup3", + {"amd64", 293}: "pipe2", + {"amd64", 294}: "inotify_init1", + {"amd64", 295}: "preadv", + {"amd64", 296}: "pwritev", + {"amd64", 297}: "rt_tgsigqueueinfo", + {"amd64", 298}: "perf_event_open", + {"amd64", 299}: "recvmmsg", + {"amd64", 300}: "fanotify_init", + {"amd64", 301}: "fanotify_mark", + {"amd64", 302}: "prlimit64", + {"amd64", 303}: "name_to_handle_at", + {"amd64", 304}: "open_by_handle_at", + {"amd64", 305}: "clock_adjtime", + {"amd64", 306}: "syncfs", + {"amd64", 307}: "sendmmsg", + {"amd64", 308}: "setns", + {"amd64", 309}: "getcpu", + {"amd64", 310}: "process_vm_readv", + {"amd64", 311}: "process_vm_writev", + {"amd64", 312}: "kcmp", + {"amd64", 313}: "finit_module", + {"amd64", 314}: "sched_setattr", + {"amd64", 315}: "sched_getattr", + {"amd64", 316}: "renameat2", + {"amd64", 317}: "seccomp", + {"amd64", 318}: "getrandom", + {"amd64", 319}: "memfd_create", + {"amd64", 320}: "kexec_file_load", + {"amd64", 321}: "bpf", + {"amd64", 322}: "execveat", + {"amd64", 323}: "userfaultfd", + {"amd64", 324}: "membarrier", + {"amd64", 325}: "mlock2", + {"amd64", 326}: "copy_file_range", + {"amd64", 327}: "preadv2", + {"amd64", 328}: "pwritev2", + {"amd64", 329}: "pkey_mprotect", + {"amd64", 330}: "pkey_alloc", + {"amd64", 331}: "pkey_free", + {"amd64", 332}: "statx", + {"amd64", 333}: "io_pgetevents", + {"amd64", 334}: "rseq", + {"amd64", 424}: "pidfd_send_signal", + {"amd64", 425}: "io_uring_setup", + {"amd64", 426}: "io_uring_enter", + {"amd64", 427}: "io_uring_register", + {"amd64", 428}: "open_tree", + {"amd64", 429}: "move_mount", + {"amd64", 430}: "fsopen", + {"amd64", 431}: "fsconfig", + {"amd64", 432}: "fsmount", + {"amd64", 433}: "fspick", + {"amd64", 434}: "pidfd_open", + {"amd64", 435}: "clone3", + {"amd64", 436}: "close_range", + {"amd64", 437}: "openat2", + {"amd64", 438}: "pidfd_getfd", + {"amd64", 439}: "faccessat2", + {"amd64", 440}: "process_madvise", + {"amd64", 441}: "epoll_pwait2", + {"amd64", 442}: "mount_setattr", + {"amd64", 443}: "quotactl_fd", + {"amd64", 444}: "landlock_create_ruleset", + {"amd64", 445}: "landlock_add_rule", + {"amd64", 446}: "landlock_restrict_self", + {"amd64", 447}: "memfd_secret", + {"amd64", 448}: "process_mrelease", + {"amd64", 449}: "futex_waitv", + {"amd64", 450}: "set_mempolicy_home_node", + {"amd64", 451}: "cachestat", + {"amd64", 452}: "fchmodat2", + {"amd64", 453}: "map_shadow_stack", + {"amd64", 454}: "futex_wake", + {"amd64", 455}: "futex_wait", + {"amd64", 456}: "futex_requeue", + {"amd64", 457}: "statmount", + {"amd64", 458}: "listmount", + {"amd64", 459}: "lsm_get_self_attr", + {"amd64", 460}: "lsm_set_self_attr", + {"amd64", 461}: "lsm_list_modules", + {"amd64", 462}: "mseal", + + // arm64 syscalls + {"arm64", 0}: "io_setup", + {"arm64", 1}: "io_destroy", + {"arm64", 2}: "io_submit", + {"arm64", 3}: "io_cancel", + {"arm64", 4}: "io_getevents", + {"arm64", 5}: "setxattr", + {"arm64", 6}: "lsetxattr", + {"arm64", 7}: "fsetxattr", + {"arm64", 8}: "getxattr", + {"arm64", 9}: "lgetxattr", + {"arm64", 10}: "fgetxattr", + {"arm64", 11}: "listxattr", + {"arm64", 12}: "llistxattr", + {"arm64", 13}: "flistxattr", + {"arm64", 14}: "removexattr", + {"arm64", 15}: "lremovexattr", + {"arm64", 16}: "fremovexattr", + {"arm64", 17}: "getcwd", + {"arm64", 18}: "lookup_dcookie", + {"arm64", 19}: "eventfd2", + {"arm64", 20}: "epoll_create1", + {"arm64", 21}: "epoll_ctl", + {"arm64", 22}: "epoll_pwait", + {"arm64", 23}: "dup", + {"arm64", 24}: "dup3", + {"arm64", 25}: "fcntl", + {"arm64", 26}: "inotify_init1", + {"arm64", 27}: "inotify_add_watch", + {"arm64", 28}: "inotify_rm_watch", + {"arm64", 29}: "ioctl", + {"arm64", 30}: "ioprio_set", + {"arm64", 31}: "ioprio_get", + {"arm64", 32}: "flock", + {"arm64", 33}: "mknodat", + {"arm64", 34}: "mkdirat", + {"arm64", 35}: "unlinkat", + {"arm64", 36}: "symlinkat", + {"arm64", 37}: "linkat", + {"arm64", 38}: "renameat", + {"arm64", 39}: "umount2", + {"arm64", 40}: "mount", + {"arm64", 41}: "pivot_root", + {"arm64", 42}: "nfsservctl", + {"arm64", 43}: "statfs", + {"arm64", 44}: "fstatfs", + {"arm64", 45}: "truncate", + {"arm64", 46}: "ftruncate", + {"arm64", 47}: "fallocate", + {"arm64", 48}: "faccessat", + {"arm64", 49}: "chdir", + {"arm64", 50}: "fchdir", + {"arm64", 51}: "chroot", + {"arm64", 52}: "fchmod", + {"arm64", 53}: "fchmodat", + {"arm64", 54}: "fchownat", + {"arm64", 55}: "fchown", + {"arm64", 56}: "openat", + {"arm64", 57}: "close", + {"arm64", 58}: "vhangup", + {"arm64", 59}: "pipe2", + {"arm64", 60}: "quotactl", + {"arm64", 61}: "getdents64", + {"arm64", 62}: "lseek", + {"arm64", 63}: "read", + {"arm64", 64}: "write", + {"arm64", 65}: "readv", + {"arm64", 66}: "writev", + {"arm64", 67}: "pread64", + {"arm64", 68}: "pwrite64", + {"arm64", 69}: "preadv", + {"arm64", 70}: "pwritev", + {"arm64", 71}: "sendfile", + {"arm64", 72}: "pselect6", + {"arm64", 73}: "ppoll", + {"arm64", 74}: "signalfd4", + {"arm64", 75}: "vmsplice", + {"arm64", 76}: "splice", + {"arm64", 77}: "tee", + {"arm64", 78}: "readlinkat", + {"arm64", 79}: "fstatat", + {"arm64", 80}: "fstat", + {"arm64", 81}: "sync", + {"arm64", 82}: "fsync", + {"arm64", 83}: "fdatasync", + {"arm64", 84}: "sync_file_range", + {"arm64", 85}: "timerfd_create", + {"arm64", 86}: "timerfd_settime", + {"arm64", 87}: "timerfd_gettime", + {"arm64", 88}: "utimensat", + {"arm64", 89}: "acct", + {"arm64", 90}: "capget", + {"arm64", 91}: "capset", + {"arm64", 92}: "personality", + {"arm64", 93}: "exit", + {"arm64", 94}: "exit_group", + {"arm64", 95}: "waitid", + {"arm64", 96}: "set_tid_address", + {"arm64", 97}: "unshare", + {"arm64", 98}: "futex", + {"arm64", 99}: "set_robust_list", + {"arm64", 100}: "get_robust_list", + {"arm64", 101}: "nanosleep", + {"arm64", 102}: "getitimer", + {"arm64", 103}: "setitimer", + {"arm64", 104}: "kexec_load", + {"arm64", 105}: "init_module", + {"arm64", 106}: "delete_module", + {"arm64", 107}: "timer_create", + {"arm64", 108}: "timer_gettime", + {"arm64", 109}: "timer_getoverrun", + {"arm64", 110}: "timer_settime", + {"arm64", 111}: "timer_delete", + {"arm64", 112}: "clock_settime", + {"arm64", 113}: "clock_gettime", + {"arm64", 114}: "clock_getres", + {"arm64", 115}: "clock_nanosleep", + {"arm64", 116}: "syslog", + {"arm64", 117}: "ptrace", + {"arm64", 118}: "sched_setparam", + {"arm64", 119}: "sched_setscheduler", + {"arm64", 120}: "sched_getscheduler", + {"arm64", 121}: "sched_getparam", + {"arm64", 122}: "sched_setaffinity", + {"arm64", 123}: "sched_getaffinity", + {"arm64", 124}: "sched_yield", + {"arm64", 125}: "sched_get_priority_max", + {"arm64", 126}: "sched_get_priority_min", + {"arm64", 127}: "sched_rr_get_interval", + {"arm64", 128}: "restart_syscall", + {"arm64", 129}: "kill", + {"arm64", 130}: "tkill", + {"arm64", 131}: "tgkill", + {"arm64", 132}: "sigaltstack", + {"arm64", 133}: "rt_sigsuspend", + {"arm64", 134}: "rt_sigaction", + {"arm64", 135}: "rt_sigprocmask", + {"arm64", 136}: "rt_sigpending", + {"arm64", 137}: "rt_sigtimedwait", + {"arm64", 138}: "rt_sigqueueinfo", + {"arm64", 139}: "rt_sigreturn", + {"arm64", 140}: "setpriority", + {"arm64", 141}: "getpriority", + {"arm64", 142}: "reboot", + {"arm64", 143}: "setregid", + {"arm64", 144}: "setgid", + {"arm64", 145}: "setreuid", + {"arm64", 146}: "setuid", + {"arm64", 147}: "setresuid", + {"arm64", 148}: "getresuid", + {"arm64", 149}: "setresgid", + {"arm64", 150}: "getresgid", + {"arm64", 151}: "setfsuid", + {"arm64", 152}: "setfsgid", + {"arm64", 153}: "times", + {"arm64", 154}: "setpgid", + {"arm64", 155}: "getpgid", + {"arm64", 156}: "getsid", + {"arm64", 157}: "setsid", + {"arm64", 158}: "getgroups", + {"arm64", 159}: "setgroups", + {"arm64", 160}: "uname", + {"arm64", 161}: "sethostname", + {"arm64", 162}: "setdomainname", + {"arm64", 163}: "getrlimit", + {"arm64", 164}: "setrlimit", + {"arm64", 165}: "getrusage", + {"arm64", 166}: "umask", + {"arm64", 167}: "prctl", + {"arm64", 168}: "getcpu", + {"arm64", 169}: "gettimeofday", + {"arm64", 170}: "settimeofday", + {"arm64", 171}: "adjtimex", + {"arm64", 172}: "getpid", + {"arm64", 173}: "getppid", + {"arm64", 174}: "getuid", + {"arm64", 175}: "geteuid", + {"arm64", 176}: "getgid", + {"arm64", 177}: "getegid", + {"arm64", 178}: "gettid", + {"arm64", 179}: "sysinfo", + {"arm64", 180}: "mq_open", + {"arm64", 181}: "mq_unlink", + {"arm64", 182}: "mq_timedsend", + {"arm64", 183}: "mq_timedreceive", + {"arm64", 184}: "mq_notify", + {"arm64", 185}: "mq_getsetattr", + {"arm64", 186}: "msgget", + {"arm64", 187}: "msgctl", + {"arm64", 188}: "msgrcv", + {"arm64", 189}: "msgsnd", + {"arm64", 190}: "semget", + {"arm64", 191}: "semctl", + {"arm64", 192}: "semtimedop", + {"arm64", 193}: "semop", + {"arm64", 194}: "shmget", + {"arm64", 195}: "shmctl", + {"arm64", 196}: "shmat", + {"arm64", 197}: "shmdt", + {"arm64", 198}: "socket", + {"arm64", 199}: "socketpair", + {"arm64", 200}: "bind", + {"arm64", 201}: "listen", + {"arm64", 202}: "accept", + {"arm64", 203}: "connect", + {"arm64", 204}: "getsockname", + {"arm64", 205}: "getpeername", + {"arm64", 206}: "sendto", + {"arm64", 207}: "recvfrom", + {"arm64", 208}: "setsockopt", + {"arm64", 209}: "getsockopt", + {"arm64", 210}: "shutdown", + {"arm64", 211}: "sendmsg", + {"arm64", 212}: "recvmsg", + {"arm64", 213}: "readahead", + {"arm64", 214}: "brk", + {"arm64", 215}: "munmap", + {"arm64", 216}: "mremap", + {"arm64", 217}: "add_key", + {"arm64", 218}: "request_key", + {"arm64", 219}: "keyctl", + {"arm64", 220}: "clone", + {"arm64", 221}: "execve", + {"arm64", 222}: "mmap", + {"arm64", 223}: "fadvise64", + {"arm64", 224}: "swapon", + {"arm64", 225}: "swapoff", + {"arm64", 226}: "mprotect", + {"arm64", 227}: "msync", + {"arm64", 228}: "mlock", + {"arm64", 229}: "munlock", + {"arm64", 230}: "mlockall", + {"arm64", 231}: "munlockall", + {"arm64", 232}: "mincore", + {"arm64", 233}: "madvise", + {"arm64", 234}: "remap_file_pages", + {"arm64", 235}: "mbind", + {"arm64", 236}: "get_mempolicy", + {"arm64", 237}: "set_mempolicy", + {"arm64", 238}: "migrate_pages", + {"arm64", 239}: "move_pages", + {"arm64", 240}: "rt_tgsigqueueinfo", + {"arm64", 241}: "perf_event_open", + {"arm64", 242}: "accept4", + {"arm64", 243}: "recvmmsg", + {"arm64", 244}: "arch_specific_syscall", + {"arm64", 260}: "wait4", + {"arm64", 261}: "prlimit64", + {"arm64", 262}: "fanotify_init", + {"arm64", 263}: "fanotify_mark", + {"arm64", 264}: "name_to_handle_at", + {"arm64", 265}: "open_by_handle_at", + {"arm64", 266}: "clock_adjtime", + {"arm64", 267}: "syncfs", + {"arm64", 268}: "setns", + {"arm64", 269}: "sendmmsg", + {"arm64", 270}: "process_vm_readv", + {"arm64", 271}: "process_vm_writev", + {"arm64", 272}: "kcmp", + {"arm64", 273}: "finit_module", + {"arm64", 274}: "sched_setattr", + {"arm64", 275}: "sched_getattr", + {"arm64", 276}: "renameat2", + {"arm64", 277}: "seccomp", + {"arm64", 278}: "getrandom", + {"arm64", 279}: "memfd_create", + {"arm64", 280}: "bpf", + {"arm64", 281}: "execveat", + {"arm64", 282}: "userfaultfd", + {"arm64", 283}: "membarrier", + {"arm64", 284}: "mlock2", + {"arm64", 285}: "copy_file_range", + {"arm64", 286}: "preadv2", + {"arm64", 287}: "pwritev2", + {"arm64", 288}: "pkey_mprotect", + {"arm64", 289}: "pkey_alloc", + {"arm64", 290}: "pkey_free", + {"arm64", 291}: "statx", + {"arm64", 292}: "io_pgetevents", + {"arm64", 293}: "rseq", + {"arm64", 294}: "kexec_file_load", + {"arm64", 403}: "clock_gettime64", + {"arm64", 404}: "clock_settime64", + {"arm64", 405}: "clock_adjtime64", + {"arm64", 406}: "clock_getres_time64", + {"arm64", 407}: "clock_nanosleep_time64", + {"arm64", 408}: "timer_gettime64", + {"arm64", 409}: "timer_settime64", + {"arm64", 410}: "timerfd_gettime64", + {"arm64", 411}: "timerfd_settime64", + {"arm64", 412}: "utimensat_time64", + {"arm64", 413}: "pselect6_time64", + {"arm64", 414}: "ppoll_time64", + {"arm64", 416}: "io_pgetevents_time64", + {"arm64", 417}: "recvmmsg_time64", + {"arm64", 418}: "mq_timedsend_time64", + {"arm64", 419}: "mq_timedreceive_time64", + {"arm64", 420}: "semtimedop_time64", + {"arm64", 421}: "rt_sigtimedwait_time64", + {"arm64", 422}: "futex_time64", + {"arm64", 423}: "sched_rr_get_interval_time64", + {"arm64", 424}: "pidfd_send_signal", + {"arm64", 425}: "io_uring_setup", + {"arm64", 426}: "io_uring_enter", + {"arm64", 427}: "io_uring_register", + {"arm64", 428}: "open_tree", + {"arm64", 429}: "move_mount", + {"arm64", 430}: "fsopen", + {"arm64", 431}: "fsconfig", + {"arm64", 432}: "fsmount", + {"arm64", 433}: "fspick", + {"arm64", 434}: "pidfd_open", + {"arm64", 435}: "clone3", + {"arm64", 436}: "close_range", + {"arm64", 437}: "openat2", + {"arm64", 438}: "pidfd_getfd", + {"arm64", 439}: "faccessat2", + {"arm64", 440}: "process_madvise", + {"arm64", 441}: "epoll_pwait2", + {"arm64", 442}: "mount_setattr", + {"arm64", 443}: "quotactl_fd", + {"arm64", 444}: "landlock_create_ruleset", + {"arm64", 445}: "landlock_add_rule", + {"arm64", 446}: "landlock_restrict_self", + {"arm64", 447}: "memfd_secret", + {"arm64", 448}: "process_mrelease", + {"arm64", 449}: "futex_waitv", + {"arm64", 450}: "set_mempolicy_home_node", + {"arm64", 451}: "cachestat", + {"arm64", 452}: "fchmodat2", + {"arm64", 453}: "map_shadow_stack", + {"arm64", 454}: "futex_wake", + {"arm64", 455}: "futex_wait", + {"arm64", 456}: "futex_requeue", + {"arm64", 457}: "statmount", + {"arm64", 458}: "listmount", + {"arm64", 459}: "lsm_get_self_attr", + {"arm64", 460}: "lsm_set_self_attr", + {"arm64", 461}: "lsm_list_modules", + {"arm64", 462}: "mseal", +} diff --git a/pkg/serializer/go.mod b/pkg/serializer/go.mod index bbe11b5b863be..0d3f6ac44d82a 100644 --- a/pkg/serializer/go.mod +++ b/pkg/serializer/go.mod @@ -23,7 +23,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/logs => ../config/logs/ github.com/DataDog/datadog-agent/pkg/config/mock => ../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../config/setup/ + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ../config/utils/ github.com/DataDog/datadog-agent/pkg/metrics => ../metrics/ github.com/DataDog/datadog-agent/pkg/obfuscate => ../obfuscate @@ -75,7 +77,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 github.com/gogo/protobuf v1.3.2 github.com/json-iterator/go v1.1.12 github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 @@ -94,7 +96,9 @@ require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 // indirect @@ -175,13 +179,13 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/serializer/go.sum b/pkg/serializer/go.sum index d952c59d01660..fdd38534f8e0a 100644 --- a/pkg/serializer/go.sum +++ b/pkg/serializer/go.sum @@ -4,10 +4,10 @@ github.com/DataDog/agent-payload/v5 v5.0.114 h1:qg3jfzz2/lOFKbFOw2yM6RM8eyMs4HlE github.com/DataDog/agent-payload/v5 v5.0.114/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.14.0 h1:J0IEqkrB8BjtuDHofR8Q3J+Z8829Ja1Mlix9cyG8wJI= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.14.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0 h1:QHx6B/VUx3rZQqrQNZI5BfypbhhGSRzCz05viyJEQmM= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0/go.mod h1:q4c7zbmdnIdSJNZuBsveTk5ZeRkSkS2g6b8zzFF1mE4= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= @@ -322,8 +322,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -331,8 +331,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -346,8 +346,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -375,12 +375,12 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -397,8 +397,8 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/serializer/internal/metrics/origin_mapping.go b/pkg/serializer/internal/metrics/origin_mapping.go index c6d21f843d807..8e362c78c99ad 100644 --- a/pkg/serializer/internal/metrics/origin_mapping.go +++ b/pkg/serializer/internal/metrics/origin_mapping.go @@ -18,6 +18,8 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { return 10 case metrics.MetricSourceJmxCustom, metrics.MetricSourceActivemq, + metrics.MetricSourceAnyscale, + metrics.MetricSourceAppgateSDP, metrics.MetricSourceCassandra, metrics.MetricSourceConfluentPlatform, metrics.MetricSourceHazelcast, @@ -41,6 +43,7 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { metrics.MetricSourceNtp, metrics.MetricSourceSystemd, metrics.MetricSourceHelm, + metrics.MetricSourceKubeflow, metrics.MetricSourceKubernetesAPIServer, metrics.MetricSourceKubernetesStateCore, metrics.MetricSourceOrchestrator, @@ -215,13 +218,13 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { metrics.MetricSourceKubeAPIserverMetrics, metrics.MetricSourceKubeControllerManager, metrics.MetricSourceKubeDNS, - metrics.MetricSourceKubeflow, metrics.MetricSourceKubeMetricsServer, metrics.MetricSourceKubeProxy, metrics.MetricSourceKubeScheduler, metrics.MetricSourceKubelet, metrics.MetricSourceKubernetesState, metrics.MetricSourceKyototycoon, + metrics.MetricSourceKyverno, metrics.MetricSourceLighttpd, metrics.MetricSourceLinkerd, metrics.MetricSourceLinuxProcExtras, @@ -262,6 +265,7 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { metrics.MetricSourceScylla, metrics.MetricSourceSilk, metrics.MetricSourceSinglestore, + metrics.MetricSourceSlurm, metrics.MetricSourceSnowflake, metrics.MetricSourceSpark, metrics.MetricSourceSqlserver, @@ -274,6 +278,7 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { metrics.MetricSourceTCPCheck, metrics.MetricSourceTeamcity, metrics.MetricSourceTeradata, + metrics.MetricSourceTibcoEMS, metrics.MetricSourceTLS, metrics.MetricSourceTokumx, metrics.MetricSourceTrafficServer, @@ -851,8 +856,18 @@ func metricSourceToOriginService(ms metrics.MetricSource) int32 { return 412 case metrics.MetricSourceAwsNeuron: return 413 + case metrics.MetricSourceAnyscale: + return 414 + case metrics.MetricSourceAppgateSDP: + return 415 case metrics.MetricSourceKubeflow: return 416 + case metrics.MetricSourceSlurm: + return 417 + case metrics.MetricSourceKyverno: + return 418 + case metrics.MetricSourceTibcoEMS: + return 419 default: return 0 } diff --git a/pkg/serverless/apikey/api_key.go b/pkg/serverless/apikey/api_key.go index f5a969dac1c7e..883123e9db92e 100644 --- a/pkg/serverless/apikey/api_key.go +++ b/pkg/serverless/apikey/api_key.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/kms" "github.com/aws/aws-sdk-go-v2/service/secretsmanager" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" datadogHttp "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -95,7 +95,7 @@ func readAPIKeyFromKMS(cipherText string) (string, error) { cfg, err := awsconfig.LoadDefaultConfig( context.TODO(), awsconfig.WithHTTPClient(&http.Client{ - Transport: datadogHttp.CreateHTTPTransport(config.Datadog()), + Transport: datadogHttp.CreateHTTPTransport(pkgconfigsetup.Datadog()), }), ) if err != nil { @@ -125,7 +125,7 @@ func readAPIKeyFromSecretsManager(arn string) (string, error) { cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithHTTPClient(&http.Client{ - Transport: datadogHttp.CreateHTTPTransport(config.Datadog()), + Transport: datadogHttp.CreateHTTPTransport(pkgconfigsetup.Datadog()), }), awsconfig.WithRegion(region), ) diff --git a/pkg/serverless/apikey/env.go b/pkg/serverless/apikey/env.go index a8c08a4ffb714..a14c5a19c780c 100644 --- a/pkg/serverless/apikey/env.go +++ b/pkg/serverless/apikey/env.go @@ -9,7 +9,7 @@ import ( "os" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -87,7 +87,7 @@ func HandleEnv() error { // Validate that an API key has been set, either by DD_API_KEY or read from KMS or Secrets Manager // --------------------------- - if !config.Datadog().IsSet("api_key") { + if !pkgconfigsetup.Datadog().IsSet("api_key") { // we're not reporting the error to AWS because we don't want the function // execution to be stopped. TODO(remy): discuss with AWS if there is way // of reporting non-critical init errors. diff --git a/pkg/serverless/daemon/daemon_test.go b/pkg/serverless/daemon/daemon_test.go index dc44e72597b5d..9ed4d8a7d93ec 100644 --- a/pkg/serverless/daemon/daemon_test.go +++ b/pkg/serverless/daemon/daemon_test.go @@ -11,6 +11,7 @@ import ( "os" "reflect" "runtime" + "strconv" "sync" "testing" "time" @@ -169,6 +170,7 @@ func TestSetTraceTagOk(t *testing.T) { "key0": "value0", } t.Setenv("DD_API_KEY", "x") + t.Setenv("DD_RECEIVER_PORT", strconv.Itoa(testutil.FreeTCPPort(t))) agent := trace.StartServerlessTraceAgent(true, &trace.LoadConfig{Path: "/does-not-exist.yml"}, make(chan *pb.Span), random.Random.Uint64()) defer agent.Stop() d := Daemon{ diff --git a/pkg/serverless/daemon/routes.go b/pkg/serverless/daemon/routes.go index ff6bc20a68c8d..733b4a49b050c 100644 --- a/pkg/serverless/daemon/routes.go +++ b/pkg/serverless/daemon/routes.go @@ -78,6 +78,9 @@ func (s *StartInvocation) ServeHTTP(w http.ResponseWriter, r *http.Request) { log.Debug("a context has been found, sending the context to the tracer") w.Header().Set(invocationlifecycle.TraceIDHeader, fmt.Sprintf("%v", s.daemon.InvocationProcessor.GetExecutionInfo().TraceID)) w.Header().Set(invocationlifecycle.SamplingPriorityHeader, fmt.Sprintf("%v", s.daemon.InvocationProcessor.GetExecutionInfo().SamplingPriority)) + if s.daemon.InvocationProcessor.GetExecutionInfo().TraceIDUpper64Hex != "" { + w.Header().Set(invocationlifecycle.TraceTagsHeader, fmt.Sprintf("%s=%s", invocationlifecycle.Upper64BitsTag, s.daemon.InvocationProcessor.GetExecutionInfo().TraceIDUpper64Hex)) + } } } diff --git a/pkg/serverless/invocationlifecycle/constants.go b/pkg/serverless/invocationlifecycle/constants.go index 28b885d662b22..9e4c824762dc8 100644 --- a/pkg/serverless/invocationlifecycle/constants.go +++ b/pkg/serverless/invocationlifecycle/constants.go @@ -35,6 +35,12 @@ const ( // SamplingPriorityHeader is the header containing the sampling priority for execution and/or inferred spans SamplingPriorityHeader = "x-datadog-sampling-priority" + // TraceTagsHeader is the header containing trace tags, e.g. the upper 64 bits tag + TraceTagsHeader = "x-datadog-tags" + + // Upper64BitsTag is the tag for the upper 64 bits of the trace ID, if it exists + Upper64BitsTag = "_dd.p.tid" + // Lambda function trigger span tag values apiGateway = "api-gateway" applicationLoadBalancer = "application-load-balancer" @@ -47,4 +53,5 @@ const ( sns = "sns" sqs = "sqs" functionURL = "lambda-function-url" + stepFunction = "step-function" ) diff --git a/pkg/serverless/invocationlifecycle/init.go b/pkg/serverless/invocationlifecycle/init.go index 8ee37838ac08e..e572696ae7d2f 100644 --- a/pkg/serverless/invocationlifecycle/init.go +++ b/pkg/serverless/invocationlifecycle/init.go @@ -106,6 +106,10 @@ func (lp *LifecycleProcessor) initFromDynamoDBStreamEvent(event events.DynamoDBE } func (lp *LifecycleProcessor) initFromEventBridgeEvent(event events.EventBridgeEvent) { + if !lp.DetectLambdaLibrary() && lp.InferredSpansEnabled { + lp.GetInferredSpan().EnrichInferredSpanWithEventBridgeEvent(event) + } + lp.requestHandler.event = event lp.addTag(tagFunctionTriggerEventSource, eventBridge) lp.addTag(tagFunctionTriggerEventSourceArn, event.Source) @@ -139,6 +143,14 @@ func (lp *LifecycleProcessor) initFromSNSEvent(event events.SNSEvent) { lp.requestHandler.event = event lp.addTag(tagFunctionTriggerEventSource, sns) lp.addTag(tagFunctionTriggerEventSourceArn, trigger.ExtractSNSEventArn(event)) + + // Check for EventBridge event wrapped by the SNS message + var eventBridgeEvent events.EventBridgeEvent + if err := json.Unmarshal([]byte(event.Records[0].SNS.Message), &eventBridgeEvent); err == nil { + if len(eventBridgeEvent.Detail.TraceContext) > 0 { + lp.createWrappedEventBridgeSpan(eventBridgeEvent) + } + } } func (lp *LifecycleProcessor) initFromSQSEvent(event events.SQSEvent) { @@ -150,19 +162,26 @@ func (lp *LifecycleProcessor) initFromSQSEvent(event events.SQSEvent) { lp.addTag(tagFunctionTriggerEventSource, sqs) lp.addTag(tagFunctionTriggerEventSourceArn, trigger.ExtractSQSEventARN(event)) - // test for SNS + // Check for SNS event wrapped by the SQS body var snsEntity events.SNSEntity - if err := json.Unmarshal([]byte(event.Records[0].Body), &snsEntity); err != nil { - return + if err := json.Unmarshal([]byte(event.Records[0].Body), &snsEntity); err == nil { + if strings.ToLower(snsEntity.Type) == "notification" && snsEntity.TopicArn != "" { + lp.createWrappedSNSSpan(snsEntity) + return + } } - isSNS := strings.ToLower(snsEntity.Type) == "notification" && snsEntity.TopicArn != "" - - if !isSNS { - return + // Check for EventBridge event wrapped by the SQS body + var eventBridgeEvent events.EventBridgeEvent + if err := json.Unmarshal([]byte(event.Records[0].Body), &eventBridgeEvent); err == nil { + if len(eventBridgeEvent.Detail.TraceContext) > 0 { + lp.createWrappedEventBridgeSpan(eventBridgeEvent) + } } +} - // sns span +// createWrappedSNSSpan creates an inferred span for SNS that is wrapped by SQS. +func (lp *LifecycleProcessor) createWrappedSNSSpan(snsEntity events.SNSEntity) { lp.requestHandler.inferredSpans[1] = &inferredspan.InferredSpan{ CurrentInvocationStartTime: time.Unix(lp.requestHandler.inferredSpans[0].Span.Start, 0), Span: &pb.Span{ @@ -175,9 +194,21 @@ func (lp *LifecycleProcessor) initFromSQSEvent(event events.SQSEvent) { snsEvent.Records[0].SNS = snsEntity lp.requestHandler.inferredSpans[1].EnrichInferredSpanWithSNSEvent(snsEvent) - lp.requestHandler.inferredSpans[1].Span.Duration = lp.GetInferredSpan().Span.Start - lp.requestHandler.inferredSpans[1].Span.Start +} +// createWrappedEventBridgeSpan creates an inferred span for EventBridge +// that is wrapped by SQS or SNS. +func (lp *LifecycleProcessor) createWrappedEventBridgeSpan(eventBridgeEvent events.EventBridgeEvent) { + lp.requestHandler.inferredSpans[1] = &inferredspan.InferredSpan{ + CurrentInvocationStartTime: time.Unix(lp.requestHandler.inferredSpans[0].Span.Start, 0), + Span: &pb.Span{ + SpanID: inferredspan.GenerateSpanId(), + }, + } + + lp.requestHandler.inferredSpans[1].EnrichInferredSpanWithEventBridgeEvent(eventBridgeEvent) + lp.requestHandler.inferredSpans[1].Span.Duration = lp.GetInferredSpan().Span.Start - lp.requestHandler.inferredSpans[1].Span.Start } func (lp *LifecycleProcessor) initFromLambdaFunctionURLEvent(event events.LambdaFunctionURLRequest, region string, accountID string, functionName string) { @@ -189,3 +220,7 @@ func (lp *LifecycleProcessor) initFromLambdaFunctionURLEvent(event events.Lambda lp.addTag(tagFunctionTriggerEventSourceArn, fmt.Sprintf("arn:aws:lambda:%v:%v:url:%v", region, accountID, functionName)) lp.addTags(trigger.GetTagsFromLambdaFunctionURLRequest(event)) } + +func (lp *LifecycleProcessor) initFromStepFunctionPayload(event events.StepFunctionPayload) { + lp.requestHandler.event = event +} diff --git a/pkg/serverless/invocationlifecycle/lifecycle.go b/pkg/serverless/invocationlifecycle/lifecycle.go index 90e931767cef1..2c210efdc5e08 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle.go +++ b/pkg/serverless/invocationlifecycle/lifecycle.go @@ -94,7 +94,6 @@ func (lp *LifecycleProcessor) OnInvokeStart(startDetails *InvocationStartDetails if err != nil { log.Debugf("[lifecycle] Failed to parse event payload: %v", err) } - eventType := trigger.GetEventType(lowercaseEventPayload) if eventType == trigger.Unknown { log.Debugf("[lifecycle] Failed to extract event type") @@ -230,6 +229,22 @@ func (lp *LifecycleProcessor) OnInvokeStart(startDetails *InvocationStartDetails } ev = event lp.initFromLambdaFunctionURLEvent(event, region, account, resource) + case trigger.LegacyStepFunctionEvent: + var event events.StepFunctionEvent + if err := json.Unmarshal(payloadBytes, &event); err != nil { + log.Debugf("Failed to unmarshal %s event: %s", stepFunction, err) + break + } + ev = event.Payload + lp.initFromStepFunctionPayload(event.Payload) + case trigger.StepFunctionEvent: + var eventPayload events.StepFunctionPayload + if err := json.Unmarshal(payloadBytes, &eventPayload); err != nil { + log.Debugf("Failed to unmarshal %s event: %s", stepFunction, err) + break + } + ev = eventPayload + lp.initFromStepFunctionPayload(eventPayload) default: log.Debug("Skipping adding trigger types and inferred spans as a non-supported payload was received.") } diff --git a/pkg/serverless/invocationlifecycle/lifecycle_test.go b/pkg/serverless/invocationlifecycle/lifecycle_test.go index 1eceb0d9ccf19..d4799305701c7 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle_test.go +++ b/pkg/serverless/invocationlifecycle/lifecycle_test.go @@ -122,6 +122,78 @@ func TestStartExecutionSpanWithLambdaLibrary(t *testing.T) { assert.Equal(t, startInvocationTime, testProcessor.GetExecutionInfo().startTime) } +func TestStartExecutionSpanStepFunctionEvent(t *testing.T) { + extraTags := &logs.Tags{ + Tags: []string{"functionname:test-function"}, + } + demux := createDemultiplexer(t) + mockProcessTrace := func(*api.Payload) {} + mockDetectLambdaLibrary := func() bool { return false } + + eventPayload := `{"Execution":{"Id":"arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:bc9f281c-3daa-4e5a-9a60-471a3810bf44","Input":{},"StartTime":"2024-07-30T19:55:52.976Z","Name":"bc9f281c-3daa-4e5a-9a60-471a3810bf44","RoleArn":"arn:aws:iam::425362996713:role/test-serverless-stepfunctions-dev-AgocsTestSFRole-tRkeFXScjyk4","RedriveCount":0},"StateMachine":{"Id":"arn:aws:states:us-east-1:425362996713:stateMachine:agocsTestSF","Name":"agocsTestSF"},"State":{"Name":"agocsTest1","EnteredTime":"2024-07-30T19:55:53.018Z","RetryCount":0}}` + startInvocationTime := time.Now() + startDetails := InvocationStartDetails{ + StartTime: startInvocationTime, + InvokeEventRawPayload: []byte(eventPayload), + InvokedFunctionARN: "arn:aws:lambda:us-east-1:123456789012:function:my-function", + } + + testProcessor := LifecycleProcessor{ + ExtraTags: extraTags, + ProcessTrace: mockProcessTrace, + DetectLambdaLibrary: mockDetectLambdaLibrary, + Demux: demux, + } + + testProcessor.OnInvokeStart(&startDetails) + + assert.NotNil(t, testProcessor.GetExecutionInfo()) + + assert.Equal(t, uint64(0), testProcessor.GetExecutionInfo().SpanID) + assert.Equal(t, uint64(5744042798732701615), testProcessor.GetExecutionInfo().TraceID) + assert.Equal(t, uint64(2902498116043018663), testProcessor.GetExecutionInfo().parentID) + assert.Equal(t, sampler.SamplingPriority(1), testProcessor.GetExecutionInfo().SamplingPriority) + upper64 := testProcessor.GetExecutionInfo().TraceIDUpper64Hex + assert.Equal(t, "1914fe7789eb32be", upper64) + assert.Equal(t, startInvocationTime, testProcessor.GetExecutionInfo().startTime) +} + +func TestLegacyLambdaStartExecutionSpanStepFunctionEvent(t *testing.T) { + extraTags := &logs.Tags{ + Tags: []string{"functionname:test-function"}, + } + demux := createDemultiplexer(t) + mockProcessTrace := func(*api.Payload) {} + mockDetectLambdaLibrary := func() bool { return false } + + eventPayload := `{"Payload":{"Execution":{"Id":"arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:bc9f281c-3daa-4e5a-9a60-471a3810bf44","Input":{},"StartTime":"2024-07-30T19:55:52.976Z","Name":"bc9f281c-3daa-4e5a-9a60-471a3810bf44","RoleArn":"arn:aws:iam::425362996713:role/test-serverless-stepfunctions-dev-AgocsTestSFRole-tRkeFXScjyk4","RedriveCount":0},"StateMachine":{"Id":"arn:aws:states:us-east-1:425362996713:stateMachine:agocsTestSF","Name":"agocsTestSF"},"State":{"Name":"agocsTest1","EnteredTime":"2024-07-30T19:55:53.018Z","RetryCount":0}}}` + startInvocationTime := time.Now() + startDetails := InvocationStartDetails{ + StartTime: startInvocationTime, + InvokeEventRawPayload: []byte(eventPayload), + InvokedFunctionARN: "arn:aws:lambda:us-east-1:123456789012:function:my-function", + } + + testProcessor := LifecycleProcessor{ + ExtraTags: extraTags, + ProcessTrace: mockProcessTrace, + DetectLambdaLibrary: mockDetectLambdaLibrary, + Demux: demux, + } + + testProcessor.OnInvokeStart(&startDetails) + + assert.NotNil(t, testProcessor.GetExecutionInfo()) + + assert.Equal(t, uint64(0), testProcessor.GetExecutionInfo().SpanID) + assert.Equal(t, uint64(5744042798732701615), testProcessor.GetExecutionInfo().TraceID) + assert.Equal(t, uint64(2902498116043018663), testProcessor.GetExecutionInfo().parentID) + assert.Equal(t, sampler.SamplingPriority(1), testProcessor.GetExecutionInfo().SamplingPriority) + upper64 := testProcessor.GetExecutionInfo().TraceIDUpper64Hex + assert.Equal(t, "1914fe7789eb32be", upper64) + assert.Equal(t, startInvocationTime, testProcessor.GetExecutionInfo().startTime) +} + func TestEndExecutionSpanNoLambdaLibrary(t *testing.T) { t.Setenv(functionNameEnvVar, "TestFunction") @@ -1162,6 +1234,88 @@ func TestTriggerTypesLifecycleEventForEventBridge(t *testing.T) { }, testProcessor.GetTags()) } +func TestTriggerTypesLifecycleEventForEventBridgeSQS(t *testing.T) { + startInvocationTime := time.Now() + duration := 1 * time.Second + endInvocationTime := startInvocationTime.Add(duration) + + var tracePayload *api.Payload + + startDetails := &InvocationStartDetails{ + InvokeEventRawPayload: getEventFromFile("eventbridgesqs.json"), + InvokedFunctionARN: "arn:aws:lambda:us-east-1:123456789012:function:my-function", + StartTime: startInvocationTime, + } + + testProcessor := &LifecycleProcessor{ + DetectLambdaLibrary: func() bool { return false }, + ProcessTrace: func(payload *api.Payload) { tracePayload = payload }, + InferredSpansEnabled: true, + requestHandler: &RequestHandler{ + executionInfo: &ExecutionStartInfo{ + TraceID: 123, + SamplingPriority: 1, + }, + }, + } + + testProcessor.OnInvokeStart(startDetails) + testProcessor.OnInvokeEnd(&InvocationEndDetails{ + RequestID: "test-request-id", + EndTime: endInvocationTime, + IsError: false, + }) + + spans := tracePayload.TracerPayload.Chunks[0].Spans + assert.Equal(t, 3, len(spans)) + eventBridgeSpan, sqsSpan := spans[1], spans[2] + assert.Equal(t, "eventbridge", eventBridgeSpan.Service) + assert.Equal(t, "test-bus", eventBridgeSpan.Resource) + assert.Equal(t, "sqs", sqsSpan.Service) + assert.Equal(t, "test-queue", sqsSpan.Resource) +} + +func TestTriggerTypesLifecycleEventForEventBridgeSNS(t *testing.T) { + startInvocationTime := time.Now() + duration := 1 * time.Second + endInvocationTime := startInvocationTime.Add(duration) + + var tracePayload *api.Payload + + startDetails := &InvocationStartDetails{ + InvokeEventRawPayload: getEventFromFile("eventbridgesns.json"), + InvokedFunctionARN: "arn:aws:lambda:us-east-1:123456789012:function:my-function", + StartTime: startInvocationTime, + } + + testProcessor := &LifecycleProcessor{ + DetectLambdaLibrary: func() bool { return false }, + ProcessTrace: func(payload *api.Payload) { tracePayload = payload }, + InferredSpansEnabled: true, + requestHandler: &RequestHandler{ + executionInfo: &ExecutionStartInfo{ + TraceID: 123, + SamplingPriority: 1, + }, + }, + } + + testProcessor.OnInvokeStart(startDetails) + testProcessor.OnInvokeEnd(&InvocationEndDetails{ + RequestID: "test-request-id", + EndTime: endInvocationTime, + IsError: false, + }) + + spans := tracePayload.TracerPayload.Chunks[0].Spans + assert.Equal(t, 3, len(spans)) + eventBridgeSpan, snsSpan := spans[1], spans[2] + assert.Equal(t, "eventbridge", eventBridgeSpan.Service) + assert.Equal(t, "test-bus", eventBridgeSpan.Resource) + assert.Equal(t, "sns", snsSpan.Service) + assert.Equal(t, "test-notifier", snsSpan.Resource) +} + // Helper function for reading test file func getEventFromFile(filename string) []byte { event, err := os.ReadFile("../trace/testdata/event_samples/" + filename) diff --git a/pkg/serverless/invocationlifecycle/trace.go b/pkg/serverless/invocationlifecycle/trace.go index de7f69eabb688..f45eeaeef8929 100644 --- a/pkg/serverless/invocationlifecycle/trace.go +++ b/pkg/serverless/invocationlifecycle/trace.go @@ -16,7 +16,7 @@ import ( json "github.com/json-iterator/go" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/random" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" @@ -34,12 +34,13 @@ var /* const */ runtimeRegex = regexp.MustCompile(`^(dotnet|go|java|ruby)(\d+(\. // ExecutionStartInfo is saved information from when an execution span was started type ExecutionStartInfo struct { - startTime time.Time - TraceID uint64 - SpanID uint64 - parentID uint64 - requestPayload []byte - SamplingPriority sampler.SamplingPriority + startTime time.Time + TraceID uint64 + TraceIDUpper64Hex string + SpanID uint64 + parentID uint64 + requestPayload []byte + SamplingPriority sampler.SamplingPriority } // startExecutionSpan records information from the start of the invocation. @@ -63,6 +64,12 @@ func (lp *LifecycleProcessor) startExecutionSpan(event interface{}, rawPayload [ inferredSpan.Span.TraceID = traceContext.TraceID inferredSpan.Span.ParentID = traceContext.ParentID } + if traceContext.TraceIDUpper64Hex != "" { + executionContext.TraceIDUpper64Hex = traceContext.TraceIDUpper64Hex + lp.requestHandler.SetMetaTag(Upper64BitsTag, traceContext.TraceIDUpper64Hex) + } else { + delete(lp.requestHandler.triggerTags, Upper64BitsTag) + } } else { executionContext.TraceID = 0 executionContext.parentID = 0 @@ -112,9 +119,9 @@ func (lp *LifecycleProcessor) endExecutionSpan(endDetails *InvocationEndDetails) if len(langMatches) >= 2 { executionSpan.Meta["language"] = langMatches[1] } - captureLambdaPayloadEnabled := config.Datadog().GetBool("capture_lambda_payload") + captureLambdaPayloadEnabled := pkgconfigsetup.Datadog().GetBool("capture_lambda_payload") if captureLambdaPayloadEnabled { - capturePayloadMaxDepth := config.Datadog().GetInt("capture_lambda_payload_max_depth") + capturePayloadMaxDepth := pkgconfigsetup.Datadog().GetInt("capture_lambda_payload_max_depth") requestPayloadJSON := make(map[string]interface{}) if err := json.Unmarshal(executionContext.requestPayload, &requestPayloadJSON); err != nil { log.Debugf("[lifecycle] Failed to parse request payload: %v", err) diff --git a/pkg/serverless/invocationlifecycle/trace_test.go b/pkg/serverless/invocationlifecycle/trace_test.go index 6b45d32755165..2dda265a0479b 100644 --- a/pkg/serverless/invocationlifecycle/trace_test.go +++ b/pkg/serverless/invocationlifecycle/trace_test.go @@ -101,6 +101,21 @@ func TestStartExecutionSpan(t *testing.T) { reqHeadersWithCtx.Set("x-datadog-sampling-priority", "3") reqHeadersWithCtx.Set("traceparent", "00-00000000000000000000000000000006-0000000000000006-01") + stepFunctionEvent := events.StepFunctionPayload{ + Execution: struct { + ID string + }{ + ID: "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:aa6c9316-713a-41d4-9c30-61131716744f", + }, + State: struct { + Name string + EnteredTime string + }{ + Name: "agocsTest1", + EnteredTime: "2024-07-30T20:46:20.824Z", + }, + } + testcases := []struct { name string event interface{} @@ -315,6 +330,20 @@ func TestStartExecutionSpan(t *testing.T) { SamplingPriority: sampler.SamplingPriority(1), }, }, + { + name: "step function event", + event: stepFunctionEvent, + payload: payloadWithoutCtx, + reqHeaders: reqHeadersWithoutCtx, + infSpanEnabled: false, + propStyle: "datadog", + expectCtx: &ExecutionStartInfo{ + TraceID: 5377636026938777059, + TraceIDUpper64Hex: "6fb5c3a05c73dbfe", + parentID: 8947638978974359093, + SamplingPriority: 1, + }, + }, } for _, tc := range testcases { @@ -333,6 +362,7 @@ func TestStartExecutionSpan(t *testing.T) { requestHandler: &RequestHandler{ executionInfo: actualCtx, inferredSpans: [2]*inferredspan.InferredSpan{inferredSpan}, + triggerTags: make(map[string]string), }, } startDetails := &InvocationStartDetails{ @@ -697,6 +727,71 @@ func TestEndExecutionSpanWithTimeout(t *testing.T) { assert.Equal(t, "Datadog detected an Impending Timeout", executionSpan.Meta["error.msg"]) } +func TestEndExecutionSpanWithStepFunctions(t *testing.T) { + t.Setenv(functionNameEnvVar, "TestFunction") + currentExecutionInfo := &ExecutionStartInfo{} + lp := &LifecycleProcessor{ + requestHandler: &RequestHandler{ + executionInfo: currentExecutionInfo, + triggerTags: make(map[string]string), + }, + } + + lp.requestHandler.triggerTags["_dd.p.tid"] = "6fb5c3a05c73dbfe" + + startTime := time.Now() + startDetails := &InvocationStartDetails{ + StartTime: startTime, + InvokeEventHeaders: http.Header{}, + } + + stepFunctionEvent := events.StepFunctionPayload{ + Execution: struct{ ID string }(struct { + ID string `json:"id"` + }{ + ID: "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:aa6c9316-713a-41d4-9c30-61131716744f", + }), + State: struct { + Name string + EnteredTime string + }{ + Name: "agocsTest1", + EnteredTime: "2024-07-30T20:46:20.824Z", + }, + } + + lp.startExecutionSpan(stepFunctionEvent, []byte("[]"), startDetails) + + assert.Equal(t, uint64(5377636026938777059), currentExecutionInfo.TraceID) + assert.Equal(t, uint64(8947638978974359093), currentExecutionInfo.parentID) + assert.Equal(t, "6fb5c3a05c73dbfe", lp.requestHandler.triggerTags["_dd.p.tid"]) + + duration := 1 * time.Second + endTime := startTime.Add(duration) + + endDetails := &InvocationEndDetails{ + EndTime: endTime, + IsError: false, + RequestID: "test-request-id", + ResponseRawPayload: []byte(`{"response":"test response payload"}`), + ColdStart: true, + ProactiveInit: false, + Runtime: "dotnet6", + } + executionSpan := lp.endExecutionSpan(endDetails) + + assert.Equal(t, "aws.lambda", executionSpan.Name) + assert.Equal(t, "aws.lambda", executionSpan.Service) + assert.Equal(t, "TestFunction", executionSpan.Resource) + assert.Equal(t, "serverless", executionSpan.Type) + assert.Equal(t, currentExecutionInfo.TraceID, executionSpan.TraceID) + assert.Equal(t, currentExecutionInfo.SpanID, executionSpan.SpanID) + assert.Equal(t, startTime.UnixNano(), executionSpan.Start) + assert.Equal(t, duration.Nanoseconds(), executionSpan.Duration) + assert.Equal(t, "6fb5c3a05c73dbfe", executionSpan.Meta["_dd.p.tid"]) + +} + func TestParseLambdaPayload(t *testing.T) { assert.Equal(t, []byte(""), ParseLambdaPayload([]byte(""))) assert.Equal(t, []byte("{}"), ParseLambdaPayload([]byte("{}"))) diff --git a/pkg/serverless/metrics/enhanced_metrics.go b/pkg/serverless/metrics/enhanced_metrics.go index c7dd8e62bcd22..7320267a2f78f 100644 --- a/pkg/serverless/metrics/enhanced_metrics.go +++ b/pkg/serverless/metrics/enhanced_metrics.go @@ -61,6 +61,10 @@ const ( totalNetworkMetric = "aws.lambda.enhanced.total_network" tmpUsedMetric = "aws.lambda.enhanced.tmp_used" tmpMaxMetric = "aws.lambda.enhanced.tmp_max" + fdMaxMetric = "aws.lambda.enhanced.fd_max" + fdUseMetric = "aws.lambda.enhanced.fd_use" + threadsMaxMetric = "aws.lambda.enhanced.threads_max" + threadsUseMetric = "aws.lambda.enhanced.threads_use" enhancedMetricsEnvVar = "DD_ENHANCED_METRICS" // Bottlecap @@ -564,6 +568,140 @@ func SendTmpEnhancedMetrics(sendMetrics chan bool, tags []string, metricAgent *S } +type generateFdEnhancedMetricsArgs struct { + FdMax float64 + FdUse float64 + Tags []string + Demux aggregator.Demultiplexer + Time float64 +} + +type generateThreadEnhancedMetricsArgs struct { + ThreadsMax float64 + ThreadsUse float64 + Tags []string + Demux aggregator.Demultiplexer + Time float64 +} + +// generateFdEnhancedMetrics generates enhanced metrics for the maximum number of file descriptors available and in use +func generateFdEnhancedMetrics(args generateFdEnhancedMetricsArgs) { + args.Demux.AggregateSample(metrics.MetricSample{ + Name: fdMaxMetric, + Value: args.FdMax, + Mtype: metrics.DistributionType, + Tags: args.Tags, + SampleRate: 1, + Timestamp: args.Time, + }) + args.Demux.AggregateSample(metrics.MetricSample{ + Name: fdUseMetric, + Value: args.FdUse, + Mtype: metrics.DistributionType, + Tags: args.Tags, + SampleRate: 1, + Timestamp: args.Time, + }) +} + +// generateThreadEnhancedMetrics generates enhanced metrics for the maximum number of threads available and in use +func generateThreadEnhancedMetrics(args generateThreadEnhancedMetricsArgs) { + args.Demux.AggregateSample(metrics.MetricSample{ + Name: threadsMaxMetric, + Value: args.ThreadsMax, + Mtype: metrics.DistributionType, + Tags: args.Tags, + SampleRate: 1, + Timestamp: args.Time, + }) + args.Demux.AggregateSample(metrics.MetricSample{ + Name: threadsUseMetric, + Value: args.ThreadsUse, + Mtype: metrics.DistributionType, + Tags: args.Tags, + SampleRate: 1, + Timestamp: args.Time, + }) +} + +func SendProcessEnhancedMetrics(sendMetrics chan bool, tags []string, metricAgent *ServerlessMetricAgent) { + if enhancedMetricsDisabled { + return + } + + pids := proc.GetPidList(proc.ProcPath) + + fdMaxData, err := proc.GetFileDescriptorMaxData(pids) + if err != nil { + log.Debug("Could not emit file descriptor enhanced metrics. %v", err) + return + } + + fdUseData, err := proc.GetFileDescriptorUseData(pids) + if err != nil { + log.Debugf("Could not emit file descriptor enhanced metrics. %v", err) + return + } + + threadsMaxData, err := proc.GetThreadsMaxData(pids) + if err != nil { + log.Debugf("Could not emit thread enhanced metrics. %v", err) + return + } + + threadsUseData, err := proc.GetThreadsUseData(pids) + if err != nil { + log.Debugf("Could not emit thread enhanced metrics. %v", err) + return + } + + fdMax := fdMaxData.MaximumFileHandles + fdUse := fdUseData.UseFileHandles + threadsMax := threadsMaxData.ThreadsMax + threadsUse := threadsUseData.ThreadsUse + + ticker := time.NewTicker(1 * time.Millisecond) + defer ticker.Stop() + for { + select { + case _, open := <-sendMetrics: + if !open { + generateFdEnhancedMetrics(generateFdEnhancedMetricsArgs{ + FdMax: fdMax, + FdUse: fdUse, + Tags: tags, + Demux: metricAgent.Demux, + Time: float64(time.Now().UnixNano()) / float64(time.Second), + }) + generateThreadEnhancedMetrics(generateThreadEnhancedMetricsArgs{ + ThreadsMax: threadsMax, + ThreadsUse: threadsUse, + Tags: tags, + Demux: metricAgent.Demux, + Time: float64(time.Now().UnixNano()) / float64(time.Second), + }) + return + } + case <-ticker.C: + pids := proc.GetPidList(proc.ProcPath) + + fdUseData, err := proc.GetFileDescriptorUseData(pids) + if err == nil { + fdUse = math.Max(fdUse, fdUseData.UseFileHandles) + } else { + log.Debugf("Could not update file descriptor use enhanced metric. %v", err) + } + + threadsUseData, err := proc.GetThreadsUseData(pids) + if err == nil { + threadsUse = math.Max(threadsUse, threadsUseData.ThreadsUse) + } else { + log.Debugf("Could not update thread use enhanced metric. %v", err) + } + } + } +} + // incrementEnhancedMetric sends an enhanced metric with a value of 1 to the metrics channel func incrementEnhancedMetric(name string, tags []string, timestamp float64, demux aggregator.Demultiplexer, force bool) { // TODO - pass config here, instead of directly looking up var diff --git a/pkg/serverless/metrics/enhanced_metrics_test.go b/pkg/serverless/metrics/enhanced_metrics_test.go index 6281b1326180f..81de716f01226 100644 --- a/pkg/serverless/metrics/enhanced_metrics_test.go +++ b/pkg/serverless/metrics/enhanced_metrics_test.go @@ -730,6 +730,100 @@ func TestSendTmpEnhancedMetricsDisabled(t *testing.T) { enhancedMetricsDisabled = false } +func TestSendFdEnhancedMetrics(t *testing.T) { + demux := createDemultiplexer(t) + tags := []string{"functionname:test-function"} + now := float64(time.Now().UnixNano()) / float64(time.Second) + args := generateFdEnhancedMetricsArgs{ + FdMax: 1024, + FdUse: 26, + Tags: tags, + Demux: demux, + Time: now, + } + go generateFdEnhancedMetrics(args) + generatedMetrics, timedMetrics := demux.WaitForNumberOfSamples(3, 0, 100*time.Millisecond) + assert.Equal(t, []metrics.MetricSample{ + { + Name: fdMaxMetric, + Value: 1024, + Mtype: metrics.DistributionType, + Tags: tags, + SampleRate: 1, + Timestamp: now, + }, + { + Name: fdUseMetric, + Value: 26, + Mtype: metrics.DistributionType, + Tags: tags, + SampleRate: 1, + Timestamp: now, + }, + }, + generatedMetrics, + ) + assert.Len(t, timedMetrics, 0) +} + +func TestSendThreadEnhancedMetrics(t *testing.T) { + demux := createDemultiplexer(t) + tags := []string{"functionname:test-function"} + now := float64(time.Now().UnixNano()) / float64(time.Second) + args := generateThreadEnhancedMetricsArgs{ + ThreadsMax: 1024, + ThreadsUse: 41, + Tags: tags, + Demux: demux, + Time: now, + } + go generateThreadEnhancedMetrics(args) + generatedMetrics, timedMetrics := demux.WaitForNumberOfSamples(3, 0, 100*time.Millisecond) + assert.Equal(t, []metrics.MetricSample{ + { + Name: threadsMaxMetric, + Value: 1024, + Mtype: metrics.DistributionType, + Tags: tags, + SampleRate: 1, + Timestamp: now, + }, + { + Name: threadsUseMetric, + Value: 41, + Mtype: metrics.DistributionType, + Tags: tags, + SampleRate: 1, + Timestamp: now, + }, + }, + generatedMetrics, + ) + assert.Len(t, timedMetrics, 0) +} + +func TestSendProcessEnhancedMetricsDisabled(t *testing.T) { + var wg sync.WaitGroup + enhancedMetricsDisabled = true + demux := createDemultiplexer(t) + metricAgent := ServerlessMetricAgent{Demux: demux} + tags := []string{"functionname:test-function"} + + wg.Add(1) + go func() { + defer wg.Done() + SendProcessEnhancedMetrics(make(chan bool), tags, &metricAgent) + }() + + generatedMetrics, timedMetrics := demux.WaitForNumberOfSamples(1, 0, 100*time.Millisecond) + + assert.Len(t, generatedMetrics, 0) + assert.Len(t, timedMetrics, 0) + + wg.Wait() + enhancedMetricsDisabled = false +} + func TestSendFailoverReasonMetric(t *testing.T) { demux := createDemultiplexer(t) tags := []string{"reason:test-reason"} diff --git a/pkg/serverless/metrics/metric.go b/pkg/serverless/metrics/metric.go index 72ac20ba15d59..9732db4e76c8f 100644 --- a/pkg/serverless/metrics/metric.go +++ b/pkg/serverless/metrics/metric.go @@ -12,8 +12,8 @@ import ( dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -52,7 +52,7 @@ const ( // GetMultipleEndpoints returns the api keys per domain specified in the main agent config func (m *MetricConfig) GetMultipleEndpoints() (map[string][]string, error) { - return utils.GetMultipleEndpoints(config.Datadog()) + return utils.GetMultipleEndpoints(pkgconfigsetup.Datadog()) } // NewServer returns a running DogStatsD server @@ -64,17 +64,17 @@ func (m *MetricDogStatsD) NewServer(demux aggregator.Demultiplexer) (dogstatsdSe func (c *ServerlessMetricAgent) Start(forwarderTimeout time.Duration, multipleEndpointConfig MultipleEndpointConfig, dogstatFactory DogStatsDFactory) { // prevents any UDP packets from being stuck in the buffer and not parsed during the current invocation // by setting this option to 1ms, all packets received will directly be sent to the parser - config.Datadog().Set("dogstatsd_packet_buffer_flush_timeout", 1*time.Millisecond, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("dogstatsd_packet_buffer_flush_timeout", 1*time.Millisecond, model.SourceAgentRuntime) // the invocation metric is also generated by Lambda Layers // we want to avoid duplicate metric - customerList := config.Datadog().GetStringSlice(statsDMetricBlocklistKey) + customerList := pkgconfigsetup.Datadog().GetStringSlice(statsDMetricBlocklistKey) // if the proxy is enabled we need to also block the errorMetric if strings.ToLower(os.Getenv(proxyEnabledEnvVar)) == "true" { - config.Datadog().Set(statsDMetricBlocklistKey, buildMetricBlocklistForProxy(customerList), model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set(statsDMetricBlocklistKey, buildMetricBlocklistForProxy(customerList), model.SourceAgentRuntime) } else { - config.Datadog().Set(statsDMetricBlocklistKey, buildMetricBlocklist(customerList), model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set(statsDMetricBlocklistKey, buildMetricBlocklist(customerList), model.SourceAgentRuntime) } demux := buildDemultiplexer(multipleEndpointConfig, forwarderTimeout) diff --git a/pkg/serverless/metrics/metric_test.go b/pkg/serverless/metrics/metric_test.go index 2b07a3e07f00d..9f7abdc5d8b75 100644 --- a/pkg/serverless/metrics/metric_test.go +++ b/pkg/serverless/metrics/metric_test.go @@ -23,7 +23,7 @@ import ( dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/hostname" ) @@ -39,7 +39,7 @@ func TestStartDoesNotBlock(t *testing.T) { if os.Getenv("CI") == "true" && runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" { t.Skip("TestStartDoesNotBlock is known to fail on the macOS Gitlab runners because of the already running Agent") } - config.LoadWithoutSecret() + pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil) metricAgent := &ServerlessMetricAgent{ SketchesBucketOffset: time.Second * 10, } @@ -89,9 +89,9 @@ func TestStartInvalidDogStatsD(t *testing.T) { func TestStartWithProxy(t *testing.T) { t.SkipNow() - originalValues := config.Datadog().GetStringSlice(statsDMetricBlocklistKey) - defer config.Datadog().SetWithoutSource(statsDMetricBlocklistKey, originalValues) - config.Datadog().SetWithoutSource(statsDMetricBlocklistKey, []string{}) + originalValues := pkgconfigsetup.Datadog().GetStringSlice(statsDMetricBlocklistKey) + defer pkgconfigsetup.Datadog().SetWithoutSource(statsDMetricBlocklistKey, originalValues) + pkgconfigsetup.Datadog().SetWithoutSource(statsDMetricBlocklistKey, []string{}) t.Setenv(proxyEnabledEnvVar, "true") @@ -106,7 +106,7 @@ func TestStartWithProxy(t *testing.T) { ErrorsMetric, } - setValues := config.Datadog().GetStringSlice(statsDMetricBlocklistKey) + setValues := pkgconfigsetup.Datadog().GetStringSlice(statsDMetricBlocklistKey) assert.Equal(t, expected, setValues) } @@ -208,7 +208,7 @@ func getAvailableUDPPort() (int, error) { func TestRaceFlushVersusParsePacket(t *testing.T) { port, err := getAvailableUDPPort() require.NoError(t, err) - config.Datadog().SetDefault("dogstatsd_port", port) + pkgconfigsetup.Datadog().SetDefault("dogstatsd_port", port) demux := aggregator.InitAndStartServerlessDemultiplexer(nil, time.Second*1000) @@ -216,7 +216,7 @@ func TestRaceFlushVersusParsePacket(t *testing.T) { require.NoError(t, err, "cannot start DSD") defer s.Stop() - url := fmt.Sprintf("127.0.0.1:%d", config.Datadog().GetInt("dogstatsd_port")) + url := fmt.Sprintf("127.0.0.1:%d", pkgconfigsetup.Datadog().GetInt("dogstatsd_port")) conn, err := net.Dial("udp", url) require.NoError(t, err, "cannot connect to DSD socket") defer conn.Close() diff --git a/pkg/serverless/otlp/otlp.go b/pkg/serverless/otlp/otlp.go index bf03f7ca40b29..fcb44c14d3c47 100644 --- a/pkg/serverless/otlp/otlp.go +++ b/pkg/serverless/otlp/otlp.go @@ -16,7 +16,7 @@ import ( "go.opentelemetry.io/collector/otelcol" coreOtlp "github.com/DataDog/datadog-agent/comp/otelcol/otlp" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -29,7 +29,7 @@ type ServerlessOTLPAgent struct { // NewServerlessOTLPAgent creates a new ServerlessOTLPAgent with the correct // otel pipeline. func NewServerlessOTLPAgent(serializer serializer.MetricSerializer) *ServerlessOTLPAgent { - pipeline, err := coreOtlp.NewPipelineFromAgentConfig(config.Datadog(), serializer, nil, nil) + pipeline, err := coreOtlp.NewPipelineFromAgentConfig(pkgconfigsetup.Datadog(), serializer, nil, nil) if err != nil { log.Error("Error creating new otlp pipeline:", err) return nil @@ -61,7 +61,7 @@ func (o *ServerlessOTLPAgent) Stop() { // IsEnabled returns true if the OTLP endpoint should be enabled. func IsEnabled() bool { - return coreOtlp.IsEnabled(config.Datadog()) + return coreOtlp.IsEnabled(pkgconfigsetup.Datadog()) } var ( diff --git a/pkg/serverless/proc/proc.go b/pkg/serverless/proc/proc.go index 76b2af63f3337..2cb4e297f47be 100644 --- a/pkg/serverless/proc/proc.go +++ b/pkg/serverless/proc/proc.go @@ -7,10 +7,12 @@ package proc import ( + "bufio" "bytes" "errors" "fmt" "io" + "math" "os" "strconv" "strings" @@ -22,10 +24,14 @@ const ( ProcStatPath = "/proc/stat" ProcUptimePath = "/proc/uptime" ProcNetDevPath = "/proc/net/dev" + ProcPath = "/proc" + PidLimitsPathFormat = "/%d/limits" + PidFdPathFormat = "/%d/fd" + PidTaskPathFormat = "/%d/task" lambdaNetworkInterface = "vinternal_1" ) -func getPidList(procPath string) []int { +func GetPidList(procPath string) []int { files, err := os.ReadDir(procPath) pids := []int{} if err != nil { @@ -69,7 +75,7 @@ func getEnvVariablesFromPid(procPath string, pid int) map[string]string { // it returns a slice since a value could be found in more than one process func SearchProcsForEnvVariable(procPath string, envName string) []string { result := []string{} - pidList := getPidList(procPath) + pidList := GetPidList(procPath) for _, pid := range pidList { envMap := getEnvVariablesFromPid(procPath, pid) if value, ok := envMap[envName]; ok { @@ -196,3 +202,162 @@ func getNetworkData(path string) (*NetworkData, error) { } } + +type FileDescriptorMaxData struct { + MaximumFileHandles float64 +} + +// GetFileDescriptorMaxData returns the maximum limit of file descriptors the function can use +func GetFileDescriptorMaxData(pids []int) (*FileDescriptorMaxData, error) { + return getFileDescriptorMaxData(ProcPath, pids) +} + +func getFileDescriptorMaxData(path string, pids []int) (*FileDescriptorMaxData, error) { + fdMax := math.Inf(1) + + for _, pid := range pids { + limitsPath := fmt.Sprint(path + fmt.Sprintf(PidLimitsPathFormat, pid)) + file, err := os.Open(limitsPath) + if err != nil { + return nil, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "Max open files") { + fields := strings.Fields(line) + if len(fields) < 6 { + log.Debugf("file descriptor max data not found in file '%s'", limitsPath) + break + } + + fdMaxPidStr := fields[3] + fdMaxPid, err := strconv.Atoi(fdMaxPidStr) + if err != nil { + log.Debugf("file descriptor max data not found in file '%s'", limitsPath) + break + } + + fdMax = math.Min(float64(fdMax), float64(fdMaxPid)) + break + } + } + } + + if fdMax != math.Inf(1) { + return &FileDescriptorMaxData{ + MaximumFileHandles: fdMax, + }, nil + } + + return nil, fmt.Errorf("file descriptor max data not found") +} + +type FileDescriptorUseData struct { + UseFileHandles float64 +} + +// GetFileDescriptorUseData returns the maximum number of file descriptors the function has used at a time +func GetFileDescriptorUseData(pids []int) (*FileDescriptorUseData, error) { + return getFileDescriptorUseData(ProcPath, pids) +} + +func getFileDescriptorUseData(path string, pids []int) (*FileDescriptorUseData, error) { + fdUse := 0 + + for _, pid := range pids { + fdPath := fmt.Sprint(path + fmt.Sprintf(PidFdPathFormat, pid)) + files, err := os.ReadDir(fdPath) + if err != nil { + return nil, fmt.Errorf("file descriptor use data not found in file '%s'", fdPath) + } + fdUse += len(files) + } + + return &FileDescriptorUseData{ + UseFileHandles: float64(fdUse), + }, nil +} + +type ThreadsMaxData struct { + ThreadsMax float64 +} + +// GetThreadsMaxData returns the maximum limit of threads the function can use +func GetThreadsMaxData(pids []int) (*ThreadsMaxData, error) { + return getThreadsMaxData(ProcPath, pids) +} + +func getThreadsMaxData(path string, pids []int) (*ThreadsMaxData, error) { + threadsMax := math.Inf(1) + + for _, pid := range pids { + limitsPath := fmt.Sprint(path + fmt.Sprintf(PidLimitsPathFormat, pid)) + file, err := os.Open(limitsPath) + if err != nil { + return nil, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "Max processes") { + fields := strings.Fields(line) + if len(fields) < 5 { + log.Debugf("threads max data not found in file '%s'", limitsPath) + break + } + + threadsMaxPidStr := fields[2] + threadsMaxPid, err := strconv.Atoi(threadsMaxPidStr) + if err != nil { + log.Debugf("file descriptor max data not found in file '%s'", limitsPath) + break + } + + threadsMax = math.Min(float64(threadsMax), float64(threadsMaxPid)) + break + } + } + } + + if threadsMax != math.Inf(1) { + return &ThreadsMaxData{ + ThreadsMax: threadsMax, + }, nil + } + + return nil, fmt.Errorf("threads max data not found") +} + +type ThreadsUseData struct { + ThreadsUse float64 +} + +// GetThreadsUseData returns the maximum number of threads the function has used at a time +func GetThreadsUseData(pids []int) (*ThreadsUseData, error) { + return getThreadsUseData(ProcPath, pids) +} + +func getThreadsUseData(path string, pids []int) (*ThreadsUseData, error) { + threadCount := 0 + for _, pid := range pids { + taskPath := fmt.Sprint(path + fmt.Sprintf(PidTaskPathFormat, pid)) + files, err := os.ReadDir(taskPath) + if err != nil { + return nil, fmt.Errorf("threads use data not found in directory '%s'", taskPath) + } + for _, file := range files { + if file.IsDir() { + threadCount++ + } + } + } + + return &ThreadsUseData{ + ThreadsUse: float64(threadCount), + }, nil +} diff --git a/pkg/serverless/proc/proc_test.go b/pkg/serverless/proc/proc_test.go index 1f08cd1e0f9f5..a43e700ed7112 100644 --- a/pkg/serverless/proc/proc_test.go +++ b/pkg/serverless/proc/proc_test.go @@ -15,12 +15,12 @@ import ( ) func TestGetPidListInvalid(t *testing.T) { - pids := getPidList("/incorrect/folder") + pids := GetPidList("/incorrect/folder") assert.Equal(t, 0, len(pids)) } func TestGetPidListValid(t *testing.T) { - pids := getPidList("./testData") + pids := GetPidList("./testData") sort.Ints(pids) assert.Equal(t, 2, len(pids)) assert.Equal(t, 13, pids[0]) @@ -143,3 +143,71 @@ func TestGetNetworkData(t *testing.T) { assert.NotNil(t, err) assert.Nil(t, networkData) } + +func TestGetFileDescriptorMaxData(t *testing.T) { + path := "./testData/process/valid" + pids := GetPidList(path) + fileDescriptorMaxData, err := getFileDescriptorMaxData(path, pids) + assert.Nil(t, err) + assert.Equal(t, float64(1024), fileDescriptorMaxData.MaximumFileHandles) + + path = "./testData/process/invalid_malformed" + pids = GetPidList(path) + fileDescriptorMaxData, err = getFileDescriptorMaxData(path, pids) + assert.NotNil(t, err) + assert.Nil(t, fileDescriptorMaxData) + + path = "./testData/process/invalid_missing" + pids = GetPidList(path) + fileDescriptorMaxData, err = getFileDescriptorMaxData(path, pids) + assert.NotNil(t, err) + assert.Nil(t, fileDescriptorMaxData) +} + +func TestGetFileDescriptorUseData(t *testing.T) { + path := "./testData/process/valid" + pids := GetPidList(path) + fileDescriptorUseData, err := getFileDescriptorUseData(path, pids) + assert.Nil(t, err) + assert.Equal(t, float64(5), fileDescriptorUseData.UseFileHandles) + + path = "./testData/process/invalid_missing" + pids = GetPidList(path) + fileDescriptorUseData, err = getFileDescriptorUseData(path, pids) + assert.NotNil(t, err) + assert.Nil(t, fileDescriptorUseData) +} + +func TestGetThreadsMaxData(t *testing.T) { + path := "./testData/process/valid" + pids := GetPidList(path) + threadsMaxData, err := getThreadsMaxData(path, pids) + assert.Nil(t, err) + assert.Equal(t, float64(1024), threadsMaxData.ThreadsMax) + + path = "./testData/process/invalid_malformed" + pids = GetPidList(path) + threadsMaxData, err = getThreadsMaxData(path, pids) + assert.NotNil(t, err) + assert.Nil(t, threadsMaxData) + + path = "./testData/process/invalid_missing" + pids = GetPidList(path) + threadsMaxData, err = getThreadsMaxData(path, pids) + assert.NotNil(t, err) + assert.Nil(t, threadsMaxData) +} + +func TestGetThreadsUseData(t *testing.T) { + path := "./testData/process/valid" + pids := GetPidList(path) + threadsUseData, err := getThreadsUseData(path, pids) + assert.Nil(t, err) + assert.Equal(t, float64(5), threadsUseData.ThreadsUse) + + path = "./testData/process/invalid_missing" + pids = GetPidList(path) + threadsUseData, err = getThreadsUseData(path, pids) + assert.NotNil(t, err) + assert.Nil(t, threadsUseData) +} diff --git a/pkg/serverless/proc/testData/process/invalid_malformed/31/limits b/pkg/serverless/proc/testData/process/invalid_malformed/31/limits new file mode 100644 index 0000000000000..2436ec08542cc --- /dev/null +++ b/pkg/serverless/proc/testData/process/invalid_malformed/31/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 +Max open files 1024 1024 +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/process/invalid_malformed/9/limits b/pkg/serverless/proc/testData/process/invalid_malformed/9/limits new file mode 100644 index 0000000000000..2d25ac3010bd4 --- /dev/null +++ b/pkg/serverless/proc/testData/process/invalid_malformed/9/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 1024 +Max open files 1024 +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/process/invalid_missing/31/limits b/pkg/serverless/proc/testData/process/invalid_missing/31/limits new file mode 100644 index 0000000000000..c7dc2c55dc689 --- /dev/null +++ b/pkg/serverless/proc/testData/process/invalid_missing/31/limits @@ -0,0 +1,15 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/process/invalid_missing/9/limits b/pkg/serverless/proc/testData/process/invalid_missing/9/limits new file mode 100644 index 0000000000000..07de49ec476ae --- /dev/null +++ b/pkg/serverless/proc/testData/process/invalid_missing/9/limits @@ -0,0 +1,15 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/test/e2e/containers/fake_datadog/app/__init__.py b/pkg/serverless/proc/testData/process/valid/31/fd/1 similarity index 100% rename from test/e2e/containers/fake_datadog/app/__init__.py rename to pkg/serverless/proc/testData/process/valid/31/fd/1 diff --git a/test/e2e/cws-tests/tests/lib/__init__.py b/pkg/serverless/proc/testData/process/valid/31/fd/2 similarity index 100% rename from test/e2e/cws-tests/tests/lib/__init__.py rename to pkg/serverless/proc/testData/process/valid/31/fd/2 diff --git a/pkg/serverless/proc/testData/process/valid/31/limits b/pkg/serverless/proc/testData/process/valid/31/limits new file mode 100644 index 0000000000000..664f04c884fad --- /dev/null +++ b/pkg/serverless/proc/testData/process/valid/31/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 1024 processes +Max open files 1024 1024 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/process/valid/31/task/1/.gitkeep b/pkg/serverless/proc/testData/process/valid/31/task/1/.gitkeep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/31/task/2/.gitkeep b/pkg/serverless/proc/testData/process/valid/31/task/2/.gitkeep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/31/task/3 b/pkg/serverless/proc/testData/process/valid/31/task/3 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/9/fd/1 b/pkg/serverless/proc/testData/process/valid/9/fd/1 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/9/fd/2 b/pkg/serverless/proc/testData/process/valid/9/fd/2 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/9/fd/3 b/pkg/serverless/proc/testData/process/valid/9/fd/3 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/9/limits b/pkg/serverless/proc/testData/process/valid/9/limits new file mode 100644 index 0000000000000..664f04c884fad --- /dev/null +++ b/pkg/serverless/proc/testData/process/valid/9/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 1024 1024 processes +Max open files 1024 1024 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 4622 4622 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/pkg/serverless/proc/testData/process/valid/9/task/1/.gitkeep b/pkg/serverless/proc/testData/process/valid/9/task/1/.gitkeep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/9/task/2/.gitkeep b/pkg/serverless/proc/testData/process/valid/9/task/2/.gitkeep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/9/task/3/.gitkeep b/pkg/serverless/proc/testData/process/valid/9/task/3/.gitkeep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/serverless.go b/pkg/serverless/serverless.go index 992ca51d55bda..b1d8c0f4c9b32 100644 --- a/pkg/serverless/serverless.go +++ b/pkg/serverless/serverless.go @@ -161,6 +161,8 @@ func callInvocationHandler(daemon *daemon.Daemon, arn string, deadlineMs int64, cpuOffsetData, cpuOffsetErr := proc.GetCPUData() uptimeOffset, uptimeOffsetErr := proc.GetUptime() networkOffsetData, networkOffsetErr := proc.GetNetworkData() + sendProcessMetrics := make(chan bool) + go metrics.SendProcessEnhancedMetrics(sendProcessMetrics, daemon.ExtraTags.Tags, daemon.MetricAgent) sendTmpMetrics := make(chan bool) go metrics.SendTmpEnhancedMetrics(sendTmpMetrics, daemon.ExtraTags.Tags, daemon.MetricAgent) @@ -179,16 +181,17 @@ func callInvocationHandler(daemon *daemon.Daemon, arn string, deadlineMs int64, case <-doneChannel: break } - sendSystemEnhancedMetrics(daemon, cpuOffsetErr == nil && uptimeOffsetErr == nil, networkOffsetErr == nil, uptimeOffset, cpuOffsetData, networkOffsetData, sendTmpMetrics) + sendSystemEnhancedMetrics(daemon, cpuOffsetErr == nil && uptimeOffsetErr == nil, networkOffsetErr == nil, uptimeOffset, cpuOffsetData, networkOffsetData, sendTmpMetrics, sendProcessMetrics) } -func sendSystemEnhancedMetrics(daemon *daemon.Daemon, emitCPUMetrics, emitNetworkMetrics bool, uptimeOffset float64, cpuOffsetData *proc.CPUData, networkOffsetData *proc.NetworkData, sendTmpMetrics chan bool) { +func sendSystemEnhancedMetrics(daemon *daemon.Daemon, emitCPUMetrics, emitNetworkMetrics bool, uptimeOffset float64, cpuOffsetData *proc.CPUData, networkOffsetData *proc.NetworkData, sendTmpMetrics chan bool, sendProcessMetrics chan bool) { if daemon.MetricAgent == nil { log.Debug("Could not send system enhanced metrics") return } close(sendTmpMetrics) + close(sendProcessMetrics) if emitCPUMetrics { metrics.SendCPUEnhancedMetrics(cpuOffsetData, uptimeOffset, daemon.ExtraTags.Tags, daemon.MetricAgent.Demux) diff --git a/pkg/serverless/trace/inferredspan/constants.go b/pkg/serverless/trace/inferredspan/constants.go index d48e12c6ccc07..4f677cef63d4b 100644 --- a/pkg/serverless/trace/inferredspan/constants.go +++ b/pkg/serverless/trace/inferredspan/constants.go @@ -14,6 +14,7 @@ const ( connectionID = "connection_id" detailType = "detail_type" endpoint = "endpoint" + eventBridgeTime = "x-datadog-start-time" eventID = "event_id" eventName = "event_name" eventSourceArn = "event_source_arn" @@ -35,6 +36,7 @@ const ( queueName = "queuename" receiptHandle = "receipt_handle" requestID = "request_id" + resourceName = "x-datadog-resource-name" resourceNames = "resource_names" senderID = "sender_id" sentTimestamp = "SentTimestamp" diff --git a/pkg/serverless/trace/inferredspan/inferred_span.go b/pkg/serverless/trace/inferredspan/inferred_span.go index a0c8811dc1106..9f4fc89862e76 100644 --- a/pkg/serverless/trace/inferredspan/inferred_span.go +++ b/pkg/serverless/trace/inferredspan/inferred_span.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/random" @@ -76,7 +76,7 @@ func FilterFunctionTags(input map[string]string) map[string]string { } // filter out DD_TAGS & DD_EXTRA_TAGS - ddTags := configUtils.GetConfiguredTags(config.Datadog(), false) + ddTags := configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false) for _, tag := range ddTags { tagParts := strings.SplitN(tag, ":", 2) if len(tagParts) != 2 { @@ -125,7 +125,7 @@ func (inferredSpan *InferredSpan) GenerateInferredSpan(startTime time.Time) { // IsInferredSpansEnabled is used to determine if we need to // generate and enrich inferred spans for a particular invocation func IsInferredSpansEnabled() bool { - return config.Datadog().GetBool("serverless.trace_enabled") && config.Datadog().GetBool("serverless.trace_managed_services") + return pkgconfigsetup.Datadog().GetBool("serverless.trace_enabled") && pkgconfigsetup.Datadog().GetBool("serverless.trace_managed_services") } // AddTagToInferredSpan is used to add new tags to the inferred span in diff --git a/pkg/serverless/trace/inferredspan/span_enrichment.go b/pkg/serverless/trace/inferredspan/span_enrichment.go index c469418de69ea..0425f277fdc8b 100644 --- a/pkg/serverless/trace/inferredspan/span_enrichment.go +++ b/pkg/serverless/trace/inferredspan/span_enrichment.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -37,7 +37,7 @@ func CreateServiceMapping(val string) map[string]string { } func init() { - serviceMappingStr := config.Datadog().GetString("serverless.service_mapping") + serviceMappingStr := pkgconfigsetup.Datadog().GetString("serverless.service_mapping") serviceMapping = CreateServiceMapping(serviceMappingStr) } @@ -293,14 +293,27 @@ func (inferredSpan *InferredSpan) EnrichInferredSpanWithEventBridgeEvent(eventPa inferredSpan.IsAsync = true inferredSpan.Span.Name = "aws.eventbridge" inferredSpan.Span.Service = serviceName - inferredSpan.Span.Start = formatISOStartTime(eventPayload.StartTime) - inferredSpan.Span.Resource = source + inferredSpan.Span.Start = eventPayload.Time.UnixNano() // use as a backup if sent timestamp isn't passed by the tracer + inferredSpan.Span.Resource = "EventBridge" // use as a backup if bus name isn't passed by the tracer inferredSpan.Span.Type = "web" inferredSpan.Span.Meta = map[string]string{ operationName: "aws.eventbridge", resourceNames: source, detailType: eventPayload.DetailType, } + + if traceContext := eventPayload.Detail.TraceContext; traceContext != nil { + // The bus name isn't included in the default AWS payload, so we use + // `x-datadog-resource-name` from the tracer if it exists. + if bus, ok := traceContext[resourceName]; ok { + inferredSpan.Span.Resource = bus + } + + // Use the `x-datadog-start-time` from the tracer if it exists. + if startTime, ok := traceContext[eventBridgeTime]; ok { + inferredSpan.Span.Start = calculateStartTime(convertStringTimestamp(startTime)) + } + } } // EnrichInferredSpanWithKinesisEvent uses the parsed event diff --git a/pkg/serverless/trace/inferredspan/span_enrichment_test.go b/pkg/serverless/trace/inferredspan/span_enrichment_test.go index 8379c6bcf717c..b43bf9f5fac91 100644 --- a/pkg/serverless/trace/inferredspan/span_enrichment_test.go +++ b/pkg/serverless/trace/inferredspan/span_enrichment_test.go @@ -637,12 +637,10 @@ func TestEnrichInferredSpanWithEventBridgeEvent(t *testing.T) { inferredSpan := mockInferredSpan() inferredSpan.EnrichInferredSpanWithEventBridgeEvent(eventBridgeEvent) span := inferredSpan.Span - assert.Equal(t, uint64(7353030974370088224), span.TraceID) - assert.Equal(t, uint64(8048964810003407541), span.SpanID) - assert.Equal(t, formatISOStartTime("2017-12-22T18:43:48Z"), span.Start) + assert.Equal(t, int64(100*1e6), span.Start) assert.Equal(t, "eventbridge", span.Service) assert.Equal(t, "aws.eventbridge", span.Name) - assert.Equal(t, "eventbridge.custom.event.sender", span.Resource) + assert.Equal(t, "testBus", span.Resource) assert.Equal(t, "web", span.Type) assert.Equal(t, "aws.eventbridge", span.Meta[operationName]) assert.Equal(t, "eventbridge.custom.event.sender", span.Meta[resourceNames]) @@ -650,6 +648,24 @@ func TestEnrichInferredSpanWithEventBridgeEvent(t *testing.T) { assert.True(t, inferredSpan.IsAsync) } +func TestEnrichInferredSpanWithEventBridgeEventNoBus(t *testing.T) { + var eventBridgeEvent events.EventBridgeEvent + _ = json.Unmarshal(getEventFromFile("eventbridge-no-bus.json"), &eventBridgeEvent) + inferredSpan := mockInferredSpan() + inferredSpan.EnrichInferredSpanWithEventBridgeEvent(eventBridgeEvent) + span := inferredSpan.Span + assert.Equal(t, "EventBridge", span.Resource) +} + +func TestEnrichInferredSpanWithEventBridgeEventNoSentTimestamp(t *testing.T) { + var eventBridgeEvent events.EventBridgeEvent + _ = json.Unmarshal(getEventFromFile("eventbridge-no-timestamp.json"), &eventBridgeEvent) + inferredSpan := mockInferredSpan() + inferredSpan.EnrichInferredSpanWithEventBridgeEvent(eventBridgeEvent) + span := inferredSpan.Span + assert.Equal(t, int64(1726505925*1e9), span.Start) +} + func TestRemapsAllInferredSpanServiceNamesFromEventBridgeEvent(t *testing.T) { // Store the original service mapping origServiceMapping := GetServiceMapping() diff --git a/pkg/serverless/trace/propagation/carriers.go b/pkg/serverless/trace/propagation/carriers.go index 8a30a557d1072..9cfa1255baabb 100644 --- a/pkg/serverless/trace/propagation/carriers.go +++ b/pkg/serverless/trace/propagation/carriers.go @@ -7,6 +7,7 @@ package propagation import ( + "crypto/sha256" "encoding/base64" "errors" "fmt" @@ -22,8 +23,8 @@ import ( ) const ( - awsTraceHeader = "AWSTraceHeader" - datadogSQSHeader = "_datadog" + awsTraceHeader = "AWSTraceHeader" + datadogTraceHeader = "_datadog" rootPrefix = "Root=" parentPrefix = "Parent=" @@ -36,16 +37,17 @@ const ( var rootRegex = regexp.MustCompile("Root=1-[0-9a-fA-F]{8}-00000000[0-9a-fA-F]{16}") var ( - errorAWSTraceHeaderMismatch = errors.New("AWSTraceHeader does not match expected regex") - errorAWSTraceHeaderEmpty = errors.New("AWSTraceHeader does not contain trace ID and parent ID") - errorStringNotFound = errors.New("String value not found in _datadog payload") - errorUnsupportedDataType = errors.New("Unsupported DataType in _datadog payload") - errorNoDDContextFound = errors.New("No Datadog trace context found") - errorUnsupportedPayloadType = errors.New("Unsupported type for _datadog payload") - errorUnsupportedTypeType = errors.New("Unsupported type in _datadog payload") - errorUnsupportedValueType = errors.New("Unsupported value type in _datadog payload") - errorUnsupportedTypeValue = errors.New("Unsupported Type in _datadog payload") - errorCouldNotUnmarshal = errors.New("Could not unmarshal the invocation event payload") + errorAWSTraceHeaderMismatch = errors.New("AWSTraceHeader does not match expected regex") + errorAWSTraceHeaderEmpty = errors.New("AWSTraceHeader does not contain trace ID and parent ID") + errorStringNotFound = errors.New("String value not found in _datadog payload") + errorUnsupportedDataType = errors.New("Unsupported DataType in _datadog payload") + errorNoDDContextFound = errors.New("No Datadog trace context found") + errorUnsupportedPayloadType = errors.New("Unsupported type for _datadog payload") + errorUnsupportedTypeType = errors.New("Unsupported type in _datadog payload") + errorUnsupportedValueType = errors.New("Unsupported value type in _datadog payload") + errorUnsupportedTypeValue = errors.New("Unsupported Type in _datadog payload") + errorCouldNotUnmarshal = errors.New("Could not unmarshal the invocation event payload") + errorNoStepFunctionContextFound = errors.New("no Step Function context found in Step Function event") ) // extractTraceContextfromAWSTraceHeader extracts trace context from the @@ -108,9 +110,20 @@ func extractTraceContextfromAWSTraceHeader(value string) (*TraceContext, error) // sqsMessageCarrier returns the tracer.TextMapReader used to extract trace // context from the events.SQSMessage type. func sqsMessageCarrier(event events.SQSMessage) (tracer.TextMapReader, error) { - if attr, ok := event.MessageAttributes[datadogSQSHeader]; ok { + // Check if this is a normal SQS message + if attr, ok := event.MessageAttributes[datadogTraceHeader]; ok { return sqsMessageAttrCarrier(attr) } + + // Check if this is an EventBridge event sent through SQS + var eventBridgeEvent events.EventBridgeEvent + if err := json.Unmarshal([]byte(event.Body), &eventBridgeEvent); err == nil { + if len(eventBridgeEvent.Detail.TraceContext) > 0 { + return eventBridgeCarrier(eventBridgeEvent) + } + } + + // Check if this is an SNS event sent through SQS return snsSqsMessageCarrier(event) } @@ -162,7 +175,16 @@ func snsSqsMessageCarrier(event events.SQSMessage) (tracer.TextMapReader, error) // snsEntityCarrier returns the tracer.TextMapReader used to extract trace // context from the attributes of an events.SNSEntity type. func snsEntityCarrier(event events.SNSEntity) (tracer.TextMapReader, error) { - msgAttrs, ok := event.MessageAttributes[datadogSQSHeader] + // Check if this is an EventBridge event sent through SNS + var eventBridgeEvent events.EventBridgeEvent + if err := json.Unmarshal([]byte(event.Message), &eventBridgeEvent); err == nil { + if len(eventBridgeEvent.Detail.TraceContext) > 0 { + return eventBridgeCarrier(eventBridgeEvent) + } + } + + // If not, check if this is a regular SNS message with Datadog trace information + msgAttrs, ok := event.MessageAttributes[datadogTraceHeader] if !ok { return nil, errorNoDDContextFound } @@ -201,6 +223,16 @@ func snsEntityCarrier(event events.SNSEntity) (tracer.TextMapReader, error) { return carrier, nil } +// eventBridgeCarrier returns the tracer.TextMapReader used to extract trace +// context from the Detail field of an events.EventBridgeEvent +func eventBridgeCarrier(event events.EventBridgeEvent) (tracer.TextMapReader, error) { + traceContext := event.Detail.TraceContext + if len(traceContext) > 0 { + return tracer.TextMapCarrier(traceContext), nil + } + return nil, errorNoDDContextFound +} + type invocationPayload struct { Headers tracer.TextMapCarrier `json:"headers"` } @@ -220,3 +252,60 @@ func rawPayloadCarrier(rawPayload []byte) (tracer.TextMapReader, error) { func headersCarrier(hdrs map[string]string) (tracer.TextMapReader, error) { return tracer.TextMapCarrier(hdrs), nil } + +// extractTraceContextFromStepFunctionContext extracts the execution ARN, state name, and state entered time and uses them to generate Trace ID and Parent ID +// The logic is based on the trace context conversion in Logs To Traces, dd-trace-py, dd-trace-js, etc. +func extractTraceContextFromStepFunctionContext(event events.StepFunctionPayload) (*TraceContext, error) { + tc := new(TraceContext) + + execArn := event.Execution.ID + stateName := event.State.Name + stateEnteredTime := event.State.EnteredTime + + if execArn == "" || stateName == "" || stateEnteredTime == "" { + return nil, errorNoStepFunctionContextFound + } + + lowerTraceID, upperTraceID := stringToDdTraceIDs(execArn) + parentID := stringToDdSpanID(execArn, stateName, stateEnteredTime) + + tc.TraceID = lowerTraceID + tc.TraceIDUpper64Hex = upperTraceID + tc.ParentID = parentID + tc.SamplingPriority = sampler.PriorityAutoKeep + return tc, nil +} + +// stringToDdSpanID hashes the Execution ARN, state name, and state entered time to generate a 64-bit span ID +func stringToDdSpanID(execArn string, stateName string, stateEnteredTime string) uint64 { + uniqueSpanString := fmt.Sprintf("%s#%s#%s", execArn, stateName, stateEnteredTime) + spanHash := sha256.Sum256([]byte(uniqueSpanString)) + parentID := getPositiveUInt64(spanHash[0:8]) + return parentID +} + +// stringToDdTraceIDs hashes an Execution ARN to generate the lower and upper 64 bits of a 128-bit trace ID +func stringToDdTraceIDs(toHash string) (uint64, string) { + hash := sha256.Sum256([]byte(toHash)) + lower64 := getPositiveUInt64(hash[8:16]) + upper64 := getHexEncodedString(getPositiveUInt64(hash[0:8])) + return lower64, upper64 +} + +// getPositiveUInt64 converts the first 8 bytes of a byte array to a positive uint64 +func getPositiveUInt64(hashBytes []byte) uint64 { + var result uint64 + for i := 0; i < 8; i++ { + result = (result << 8) + uint64(hashBytes[i]) + } + result &= ^uint64(1 << 63) // Ensure the highest bit is always 0 + if result == 0 { + return 1 + } + return result +} + +func getHexEncodedString(toEncode uint64) string { + //return hex.EncodeToString(hashBytes[:8]) + return fmt.Sprintf("%x", toEncode) //maybe? +} diff --git a/pkg/serverless/trace/propagation/carriers_test.go b/pkg/serverless/trace/propagation/carriers_test.go index f21226c088fa3..c58b294b74e39 100644 --- a/pkg/serverless/trace/propagation/carriers_test.go +++ b/pkg/serverless/trace/propagation/carriers_test.go @@ -250,6 +250,18 @@ func TestSnsEntityCarrier(t *testing.T) { expMap map[string]string expErr string }{ + { + name: "eventbridge-through-sns", + event: events.SNSEntity{ + Message: `{"detail":{"_datadog":{"x-datadog-trace-id":"123456789","x-datadog-parent-id":"987654321","x-datadog-sampling-priority":"1"}}}`, + }, + expMap: map[string]string{ + "x-datadog-trace-id": "123456789", + "x-datadog-parent-id": "987654321", + "x-datadog-sampling-priority": "1", + }, + expErr: "", + }, { name: "no-msg-attrs", event: events.SNSEntity{}, @@ -372,6 +384,72 @@ func TestSnsEntityCarrier(t *testing.T) { } } +func TestEventBridgeCarrier(t *testing.T) { + testcases := []struct { + name string + event events.EventBridgeEvent + expMap map[string]string + expErr string + }{ + { + name: "valid_trace_context", + event: events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: map[string]string{ + "x-datadog-trace-id": "123456789", + "x-datadog-parent-id": "987654321", + "x-datadog-sampling-priority": "1", + }, + }, + }, + expMap: map[string]string{ + "x-datadog-trace-id": "123456789", + "x-datadog-parent-id": "987654321", + "x-datadog-sampling-priority": "1", + }, + expErr: "", + }, + { + name: "missing_trace_context", + event: events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: map[string]string{}, + }, + }, + expMap: nil, + expErr: "No Datadog trace context found", + }, + { + name: "nil_trace_context", + event: events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: nil, + }, + }, + expMap: nil, + expErr: "No Datadog trace context found", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + tm, err := eventBridgeCarrier(tc.event) + t.Logf("eventBridgeCarrier returned TextMapReader=%#v error=%#v", tm, err) + assert.Equal(t, tc.expErr != "", err != nil) + if tc.expErr != "" { + assert.ErrorContains(t, err, tc.expErr) + } + assert.Equal(t, tc.expMap, getMapFromCarrier(tm)) + }) + } +} + func TestExtractTraceContextfromAWSTraceHeader(t *testing.T) { ctx := func(trace, parent, priority uint64) *TraceContext { return &TraceContext{ @@ -623,6 +701,18 @@ func TestSqsMessageCarrier(t *testing.T) { expMap: headersMapAll, expErr: nil, }, + { + name: "eventbridge-through-sqs", + event: events.SQSMessage{ + Body: `{"detail":{"_datadog":{"x-datadog-trace-id":"123456789","x-datadog-parent-id":"987654321","x-datadog-sampling-priority":"1"}}}`, + }, + expMap: map[string]string{ + "x-datadog-trace-id": "123456789", + "x-datadog-parent-id": "987654321", + "x-datadog-sampling-priority": "1", + }, + expErr: nil, + }, } for _, tc := range testcases { @@ -735,3 +825,75 @@ func TestHeadersCarrier(t *testing.T) { }) } } + +func Test_stringToDdSpanId(t *testing.T) { + type args struct { + execArn string + stateName string + stateEnteredTime string + } + tests := []struct { + name string + args args + want uint64 + }{ + {"first Test Case", + args{ + "arn:aws:states:sa-east-1:601427271234:express:DatadogStateMachine:acaf1a67-336a-e854-1599-2a627eb2dd8a:c8baf081-31f1-464d-971f-70cb17d01111", + "step-one", + "2022-12-08T21:08:19.224Z", + }, + 4340734536022949921, + }, + { + "second Test Case", + args{ + "arn:aws:states:sa-east-1:601427271234:express:DatadogStateMachine:acaf1a67-336a-e854-1599-2a627eb2dd8a:c8baf081-31f1-464d-971f-70cb17d01111", + "step-one", + "2022-12-08T21:08:19.224Y", + }, + 981693280319792699, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, stringToDdSpanID(tt.args.execArn, tt.args.stateName, tt.args.stateEnteredTime), "stringToDdSpanID(%v, %v, %v)", tt.args.execArn, tt.args.stateName, tt.args.stateEnteredTime) + }) + } +} + +func Test_stringToDdTraceIds(t *testing.T) { + type args struct { + toHash string + } + tests := []struct { + name string + args args + expectedLower64 uint64 + expectedUpper64Hex string + }{ + { + "first Test Case", + args{ + "arn:aws:states:sa-east-1:425362996713:stateMachine:MyStateMachine-b276uka1j", + }, + 1680583253837593461, + "60ee1db79e4803f8", + }, + { + "lifecycle_test.go TestStartExecutionSpanStepFunctionEvent test case", + args{ + "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:bc9f281c-3daa-4e5a-9a60-471a3810bf44", + }, + 5744042798732701615, + "1914fe7789eb32be", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1 := stringToDdTraceIDs(tt.args.toHash) + assert.Equalf(t, tt.expectedLower64, got, "stringToDdTraceIDs(%v)", tt.args.toHash) + assert.Equalf(t, tt.expectedUpper64Hex, got1, "stringToDdTraceIDs(%v)", tt.args.toHash) + }) + } +} diff --git a/pkg/serverless/trace/propagation/extractor.go b/pkg/serverless/trace/propagation/extractor.go index eb745f4f49175..d6c756ae1693f 100644 --- a/pkg/serverless/trace/propagation/extractor.go +++ b/pkg/serverless/trace/propagation/extractor.go @@ -45,9 +45,10 @@ type Extractor struct { // TraceContext stores the propagated trace context values. type TraceContext struct { - TraceID uint64 - ParentID uint64 - SamplingPriority sampler.SamplingPriority + TraceID uint64 + TraceIDUpper64Hex string + ParentID uint64 + SamplingPriority sampler.SamplingPriority } // TraceContextExtended stores the propagated trace context values plus other @@ -100,6 +101,8 @@ func (e Extractor) extract(event interface{}) (*TraceContext, error) { return nil, errorNoSNSRecordFound case events.SNSEntity: carrier, err = snsEntityCarrier(ev) + case events.EventBridgeEvent: + carrier, err = eventBridgeCarrier(ev) case events.APIGatewayProxyRequest: carrier, err = headersCarrier(ev.Headers) case events.APIGatewayV2HTTPRequest: @@ -112,6 +115,11 @@ func (e Extractor) extract(event interface{}) (*TraceContext, error) { carrier, err = headersCarrier(ev.Headers) case events.LambdaFunctionURLRequest: carrier, err = headersCarrier(ev.Headers) + case events.StepFunctionPayload: + tc, err := extractTraceContextFromStepFunctionContext(ev) + if err == nil { + return tc, nil + } default: err = errorUnsupportedExtractionType } diff --git a/pkg/serverless/trace/propagation/extractor_test.go b/pkg/serverless/trace/propagation/extractor_test.go index 5635f0714aaeb..8cdd07c6d9027 100644 --- a/pkg/serverless/trace/propagation/extractor_test.go +++ b/pkg/serverless/trace/propagation/extractor_test.go @@ -9,6 +9,7 @@ import ( "encoding/base64" "encoding/json" "errors" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" "net/http" "os" "testing" @@ -407,6 +408,72 @@ func TestExtractorExtract(t *testing.T) { expNoErr: true, }, + // events.EventBridgeEvent + { + name: "eventbridge-event-empty", + events: []interface{}{ + events.EventBridgeEvent{}, + }, + expCtx: nil, + expNoErr: false, + }, + { + name: "eventbridge-event-with-dd-headers", + events: []interface{}{ + events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: headersMapDD, + }, + }, + }, + expCtx: ddTraceContext, + expNoErr: true, + }, + { + name: "eventbridge-event-with-all-headers", + events: []interface{}{ + events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: headersMapAll, + }, + }, + }, + expCtx: ddTraceContext, + expNoErr: true, + }, + { + name: "eventbridge-event-with-w3c-headers", + events: []interface{}{ + events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: headersMapW3C, + }, + }, + }, + expCtx: w3cTraceContext, + expNoErr: true, + }, + { + name: "eventbridge-event-without-trace-context", + events: []interface{}{ + events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: map[string]string{}, + }, + }, + }, + expCtx: nil, + expNoErr: false, + }, + // events.APIGatewayProxyRequest: { name: "APIGatewayProxyRequest", @@ -510,6 +577,34 @@ func TestExtractorExtract(t *testing.T) { expCtx: nil, expNoErr: false, }, + + // Step Functions event + { + name: "step-function-event with no input", + events: []interface{}{ + events.StepFunctionPayload{ + Execution: struct { + ID string + }{ + ID: "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:aa6c9316-713a-41d4-9c30-61131716744f", + }, + State: struct { + Name string + EnteredTime string + }{ + Name: "agocsTest1", + EnteredTime: "2024-07-30T20:46:20.824Z", + }, + }, + }, + expCtx: &TraceContext{ + TraceID: 5377636026938777059, + TraceIDUpper64Hex: "6fb5c3a05c73dbfe", + ParentID: 8947638978974359093, + SamplingPriority: 1, + }, + expNoErr: true, + }, } for _, tc := range testcases { @@ -990,3 +1085,35 @@ func TestConvertStrToUint64(t *testing.T) { }) } } + +func TestEventBridgeCarrierWithW3CHeaders(t *testing.T) { + const ( + testResourceName = "test-event-bus" + testStartTime = "1632150183123456789" + ) + + event := events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: map[string]string{ + "traceparent": headersMapW3C["traceparent"], + "tracestate": headersMapW3C["tracestate"], + "x-datadog-resource-name": testResourceName, + "x-datadog-start-time": testStartTime, + }, + }, + } + + carrier, err := eventBridgeCarrier(event) + assert.NoError(t, err) + assert.NotNil(t, carrier) + + textMapCarrier, ok := carrier.(tracer.TextMapCarrier) + assert.True(t, ok) + + assert.Equal(t, headersMapW3C["traceparent"], textMapCarrier["traceparent"]) + assert.Equal(t, headersMapW3C["tracestate"], textMapCarrier["tracestate"]) + assert.Equal(t, testResourceName, textMapCarrier["x-datadog-resource-name"]) + assert.Equal(t, testStartTime, textMapCarrier["x-datadog-start-time"]) +} diff --git a/pkg/serverless/trace/testdata/event_samples/eventbridge-custom.json b/pkg/serverless/trace/testdata/event_samples/eventbridge-custom.json index 96c11000600ae..f068b8efad806 100644 --- a/pkg/serverless/trace/testdata/event_samples/eventbridge-custom.json +++ b/pkg/serverless/trace/testdata/event_samples/eventbridge-custom.json @@ -12,7 +12,9 @@ "_datadog": { "x-datadog-trace-id": "12345", "x-datadog-parent-id": "67890", - "x-datadog-sampling-priority": "2" + "x-datadog-sampling-priority": "2", + "x-datadog-resource-name": "testBus", + "x-datadog-start-time": "100" } } } diff --git a/pkg/serverless/trace/testdata/event_samples/eventbridge-no-bus.json b/pkg/serverless/trace/testdata/event_samples/eventbridge-no-bus.json new file mode 100644 index 0000000000000..b0d7ede4e571b --- /dev/null +++ b/pkg/serverless/trace/testdata/event_samples/eventbridge-no-bus.json @@ -0,0 +1,19 @@ +{ + "version": "0", + "id": "fd03f394-e769-eff5-08a8-53c228933591", + "detail-type": "testdetail", + "source": "eventbridge.custom.event.sender", + "account": "425362996713", + "time": "2021-11-04T01:37:45Z", + "region": "sa-east-1", + "resources": [], + "detail": { + "foo": "bar", + "_datadog": { + "x-datadog-trace-id": "12345", + "x-datadog-parent-id": "67890", + "x-datadog-sampling-priority": "2", + "x-datadog-start-time": "100" + } + } +} diff --git a/pkg/serverless/trace/testdata/event_samples/eventbridge-no-timestamp.json b/pkg/serverless/trace/testdata/event_samples/eventbridge-no-timestamp.json new file mode 100644 index 0000000000000..89222beabd4b3 --- /dev/null +++ b/pkg/serverless/trace/testdata/event_samples/eventbridge-no-timestamp.json @@ -0,0 +1,19 @@ +{ + "version": "0", + "id": "fd03f394-e769-eff5-08a8-53c228933591", + "detail-type": "testdetail", + "source": "eventbridge.custom.event.sender", + "account": "425362996713", + "time": "2024-09-16T16:58:45Z", + "region": "sa-east-1", + "resources": [], + "detail": { + "foo": "bar", + "_datadog": { + "x-datadog-trace-id": "12345", + "x-datadog-parent-id": "67890", + "x-datadog-sampling-priority": "2", + "x-datadog-resource-name": "testBus" + } + } +} diff --git a/pkg/serverless/trace/testdata/event_samples/eventbridgesns.json b/pkg/serverless/trace/testdata/event_samples/eventbridgesns.json new file mode 100644 index 0000000000000..0e9fb853b3dfb --- /dev/null +++ b/pkg/serverless/trace/testdata/event_samples/eventbridgesns.json @@ -0,0 +1,17 @@ +{ + "Records":[ + { + "SNS":{ + "MessageID":"12345678-90abc-def-1234-567890abcdef", + "Type":"Notification", + "TopicArn":"arn:aws:sns:us-east-1:123456789012:test-notifier", + "MessageAttributes":{ + + }, + "Timestamp":"2024-09-16T19:44:01.713Z", + "Subject":"", + "Message":"{\"version\":\"0\",\"id\":\"12345678-90abc-def-1234-567890abcdef\",\"detail-type\":\"TestDetail\",\"source\":\"com.test.source\",\"account\":\"12345667890\",\"time\":\"2024-09-16T19:44:01Z\",\"region\":\"us-east-1\",\"resources\":[],\"detail\":{\"foo\":\"bar\",\"_datadog\":{\"x-datadog-trace-id\":\"12345\",\"x-datadog-parent-id\":\"67890\",\"x-datadog-sampling-priority\":\"1\",\"x-datadog-start-time\":\"1726515840997\",\"x-datadog-resource-name\":\"test-bus\",\"x-datadog-tags\":\"_dd.p.dm=-1,_dd.p.tid=123567890\"}}}" + } + } + ] +} diff --git a/pkg/serverless/trace/testdata/event_samples/eventbridgesqs.json b/pkg/serverless/trace/testdata/event_samples/eventbridgesqs.json new file mode 100644 index 0000000000000..45ca19b93ee38 --- /dev/null +++ b/pkg/serverless/trace/testdata/event_samples/eventbridgesqs.json @@ -0,0 +1,19 @@ +{ + "Records":[ + { + "ReceiptHandle":"AQEB9RCmPUwKay0Fttcd7JEN1XPUwBq4ixSPWCQ5ne4x2r4SOQmyBy45h08wPSLe3ZXXXqjpAawK0J91O6wu/DsBHFZnYL2CIBbYhnZsYkwiO8XwsDQrf1ZSTTFH7eGwHuVQ2BsX7O+a9m+5THfXl6e7kBhfNTkATxstbr2iVRObgkvmiI9DdoBCsWBHqn8Z48j28ExS4Ov3i1olku6DcTnq6WxBGPMIYz3qX2LEnDFGNwnL6Ldzi/R4C7BJ8qMvsQeXFFAfGuWNjQsO6PKDhKo1eAEzozlcQd5sDtflIeMsNhfi3LusSPudncQ+zS9qUOWKgezKZqVBLbea4Mt1XIpe/e4WL2DVFfU5IE4cjsxrGEF9v2hcGelCrRexEqy+BVi0NLdwyO6R5L1GfU/1NJUVEE9o8wEqtC+0lrwG8xC6eS0=", + "Body":"{\"version\":\"0\",\"id\":\"103310e6-f267-750d-8cdd-6bee88ad2c9c\",\"detail-type\":\"TestDetail\",\"source\":\"com.test.source\",\"account\":\"12345\",\"time\":\"2024-09-16T19:00:27Z\",\"region\":\"us-east-1\",\"resources\":[],\"detail\":{\"foo\":\"bar\",\"_datadog\":{\"x-datadog-trace-id\":\"12345\",\"x-datadog-parent-id\":\"67890\",\"x-datadog-sampling-priority\":\"1\",\"x-datadog-start-time\":\"1726513226645\",\"x-datadog-resource-name\":\"test-bus\",\"x-datadog-tags\":\"_dd.p.dm=-1,_dd.p.tid=1234567800000000\"}}}", + "Attributes":{ + "ApproximateReceiveCount":"1", + "SentTimestamp":"1726513227336", + "SenderId":"AIDAIOA2GYWSHW4E2VXIO", + "ApproximateFirstReceiveTimestamp":"1726513227350" + }, + "MessageAttributes":{ + + }, + "eventSource": "aws:sqs", + "EventSourceARN":"arn:aws:sqs:us-east-1:123456789012:test-queue" + } + ] +} diff --git a/pkg/serverless/trace/testdata/event_samples/stepfunction.json b/pkg/serverless/trace/testdata/event_samples/stepfunction.json new file mode 100644 index 0000000000000..74e4c010aac2d --- /dev/null +++ b/pkg/serverless/trace/testdata/event_samples/stepfunction.json @@ -0,0 +1,21 @@ +{ + "Payload": { + "Execution": { + "Id": "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:bc9f281c-3daa-4e5a-9a60-471a3810bf44", + "Input": {}, + "StartTime": "2024-07-30T19:55:52.976Z", + "Name": "bc9f281c-3daa-4e5a-9a60-471a3810bf44", + "RoleArn": "arn:aws:iam::425362996713:role/test-serverless-stepfunctions-dev-AgocsTestSFRole-tRkeFXScjyk4", + "RedriveCount": 0 + }, + "StateMachine": { + "Id": "arn:aws:states:us-east-1:425362996713:stateMachine:agocsTestSF", + "Name": "agocsTestSF" + }, + "State": { + "Name": "agocsTest1", + "EnteredTime": "2024-07-30T19:55:53.018Z", + "RetryCount": 0 + } + } +} diff --git a/pkg/serverless/trace/trace.go b/pkg/serverless/trace/trace.go index 1c7d901e04619..c941f113069a3 100644 --- a/pkg/serverless/trace/trace.go +++ b/pkg/serverless/trace/trace.go @@ -11,19 +11,20 @@ import ( "os" "strings" + "github.com/DataDog/datadog-go/v5/statsd" + "github.com/DataDog/datadog-agent/cmd/serverless-init/cloudservice" compcorecfg "github.com/DataDog/datadog-agent/comp/core/config" zstd "github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd" comptracecfg "github.com/DataDog/datadog-agent/comp/trace/config" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/agent" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-go/v5/statsd" ) // ServerlessTraceAgent represents a trace agent in a serverless context @@ -96,7 +97,7 @@ func StartServerlessTraceAgent(enabled bool, loadConfig Load, lambdaSpanChan cha // Set the serverless config option which will be used to determine if // hostname should be resolved. Skipping hostname resolution saves >1s // in load time between gRPC calls and agent commands. - ddConfig.Datadog().Set("serverless.enabled", true, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("serverless.enabled", true, model.SourceAgentRuntime) tc, confErr := loadConfig.Load() if confErr != nil { diff --git a/pkg/serverless/trigger/events.go b/pkg/serverless/trigger/events.go index 95dca9ad76348..eea104d487de1 100644 --- a/pkg/serverless/trigger/events.go +++ b/pkg/serverless/trigger/events.go @@ -76,6 +76,12 @@ const ( // LambdaFunctionURLEvent describes an event from an HTTP lambda function URL invocation LambdaFunctionURLEvent + + // StepFunctionEvent describes an event with a Step Function execution context + StepFunctionEvent + + // LegacyStepFunctionEvent describes an event with a Legacy Lambda Step Function execution context + LegacyStepFunctionEvent ) // eventParseFunc defines the signature of AWS event parsing functions @@ -110,6 +116,8 @@ var ( {isAppSyncResolverEvent, AppSyncResolverEvent}, {isEventBridgeEvent, EventBridgeEvent}, {isLambdaFunctionURLEvent, LambdaFunctionURLEvent}, + {isStepFunctionEvent, StepFunctionEvent}, + {isLegacyStepFunctionEvent, LegacyStepFunctionEvent}, // Ultimately check this is a Kong API Gateway event as a last resort. // This is because Kong API Gateway events are a subset of API Gateway events // as of https://github.com/Kong/kong/blob/348c980/kong/plugins/aws-lambda/request-util.lua#L248-L260 @@ -270,6 +278,32 @@ func isLambdaFunctionURLEvent(event map[string]any) bool { return strings.Contains(lambdaURL, "lambda-url") } +func isLegacyStepFunctionEvent(event map[string]any) bool { + execId := json.GetNestedValue(event, "payload", "execution", "id") + if execId == nil { + return false + } + stateName := json.GetNestedValue(event, "payload", "state", "name") + if stateName == nil { + return false + } + stateEnteredTime := json.GetNestedValue(event, "payload", "state", "enteredtime") + return stateEnteredTime != nil +} + +func isStepFunctionEvent(event map[string]any) bool { + execId := json.GetNestedValue(event, "execution", "id") + if execId == nil { + return false + } + stateName := json.GetNestedValue(event, "state", "name") + if stateName == nil { + return false + } + stateEnteredTime := json.GetNestedValue(event, "state", "enteredtime") + return stateEnteredTime != nil +} + func eventRecordsKeyExists(event map[string]any, key string) bool { records, ok := json.GetNestedValue(event, "records").([]interface{}) if !ok { @@ -336,6 +370,8 @@ func (et AWSEventType) String() string { return "EventBridgeEvent" case LambdaFunctionURLEvent: return "LambdaFunctionURLEvent" + case StepFunctionEvent: + return "StepFunctionEvent" default: return fmt.Sprintf("EventType(%d)", et) } diff --git a/pkg/serverless/trigger/events/events.go b/pkg/serverless/trigger/events/events.go index 23c0e6566d853..03e4760b82044 100644 --- a/pkg/serverless/trigger/events/events.go +++ b/pkg/serverless/trigger/events/events.go @@ -230,7 +230,10 @@ type KinesisRecord struct { type EventBridgeEvent struct { DetailType string `json:"detail-type"` Source string - StartTime string + Time time.Time + Detail struct { + TraceContext map[string]string `json:"_datadog"` + } } // S3Event mirrors events.S3Event type, removing unused fields. @@ -283,6 +286,7 @@ type SNSEntity struct { MessageAttributes map[string]interface{} Timestamp time.Time Subject string + Message string } // SQSEvent mirrors events.SQSEvent type, removing unused fields. @@ -334,3 +338,21 @@ type LambdaFunctionURLRequestContextHTTPDescription struct { SourceIP string UserAgent string } + +// StepFunctionEvent is the event you get when you instrument a legacy Stepfunction Lambda:Invoke task state +// as recommended by https://docs.datadoghq.com/serverless/step_functions/installation?tab=custom +// This isn't an "official" event, as a default StepFunction invocation will just contain {} +type StepFunctionEvent struct { + Payload StepFunctionPayload +} + +// StepFunctionPayload is the payload of a StepFunctionEvent. It's also a non-legacy version of the `StepFunctionEvent`. +type StepFunctionPayload struct { + Execution struct { + ID string + } + State struct { + Name string + EnteredTime string + } +} diff --git a/pkg/serverless/trigger/events_test.go b/pkg/serverless/trigger/events_test.go index 9a9130f2567d6..f5462feafc4fb 100644 --- a/pkg/serverless/trigger/events_test.go +++ b/pkg/serverless/trigger/events_test.go @@ -37,6 +37,7 @@ func TestEventPayloadParsing(t *testing.T) { "sns.json": isSNSEvent, "sqs.json": isSQSEvent, "lambdaurl.json": isLambdaFunctionURLEvent, + "stepfunction.json": isStepFunctionEvent, } for testFile, testFunc := range testCases { file, err := os.Open(fmt.Sprintf("%v/%v", testDir, testFile)) @@ -115,6 +116,8 @@ func TestGetEventType(t *testing.T) { "sns.json": SNSEvent, "sqs.json": SQSEvent, "lambdaurl.json": LambdaFunctionURLEvent, + "stepfunction.json": StepFunctionEvent, + "legacystepfunction.json": LegacyStepFunctionEvent, } for testFile, expectedEventType := range testCases { diff --git a/pkg/serverless/trigger/testData/legacystepfunction.json b/pkg/serverless/trigger/testData/legacystepfunction.json new file mode 100644 index 0000000000000..70fe2e5c995d3 --- /dev/null +++ b/pkg/serverless/trigger/testData/legacystepfunction.json @@ -0,0 +1,22 @@ +{ + "FunctionName": "${lambdaArn}", + "Payload": { + "Execution": { + "Id": "arn:aws:states:sa-east-1:425362996713:execution:invokeJavaLambda:c0ca8d0f-a3af-4c42-bfd4-b3b100e77f01", + "Input": {}, + "StartTime": "2024-08-29T21:48:55.187Z", + "Name": "c0ca8d0f-a3af-4c42-bfd4-b3b100e77f01", + "RoleArn": "arn:aws:iam::425362996713:role/new-extension-test-java-dev-InvokeJavaLambdaRole-LtJmnJReIOTS", + "RedriveCount": 0 + }, + "StateMachine": { + "Id": "arn:aws:states:sa-east-1:425362996713:stateMachine:invokeJavaLambda", + "Name": "invokeJavaLambda" + }, + "State": { + "Name": "invoker", + "EnteredTime": "2024-08-29T21:48:55.275Z", + "RetryCount": 0 + } + } +} diff --git a/pkg/serverless/trigger/testData/stepfunction.json b/pkg/serverless/trigger/testData/stepfunction.json new file mode 100644 index 0000000000000..5cdd11d7f405e --- /dev/null +++ b/pkg/serverless/trigger/testData/stepfunction.json @@ -0,0 +1,19 @@ +{ + "Execution": { + "Id": "arn:aws:states:sa-east-1:425362996713:execution:invokeJavaLambda:c0ca8d0f-a3af-4c42-bfd4-b3b100e77f01", + "Input": {}, + "StartTime": "2024-08-29T21:48:55.187Z", + "Name": "c0ca8d0f-a3af-4c42-bfd4-b3b100e77f01", + "RoleArn": "arn:aws:iam::425362996713:role/new-extension-test-java-dev-InvokeJavaLambdaRole-LtJmnJReIOTS", + "RedriveCount": 0 + }, + "StateMachine": { + "Id": "arn:aws:states:sa-east-1:425362996713:stateMachine:invokeJavaLambda", + "Name": "invokeJavaLambda" + }, + "State": { + "Name": "invoker", + "EnteredTime": "2024-08-29T21:48:55.275Z", + "RetryCount": 0 + } +} diff --git a/pkg/snmp/snmp.go b/pkg/snmp/snmp.go index 6ee423b23df50..6913ae4a866a7 100644 --- a/pkg/snmp/snmp.go +++ b/pkg/snmp/snmp.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/viper" "github.com/gosnmp/gosnmp" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/snmp/gosnmplib" "github.com/DataDog/datadog-agent/pkg/snmp/snmpintegration" @@ -119,13 +119,13 @@ func NewListenerConfig() (ListenerConfig, error) { snmpConfig.CollectDeviceMetadata = true snmpConfig.CollectTopology = true - if coreconfig.Datadog().IsSet("network_devices.autodiscovery") { - err := coreconfig.Datadog().UnmarshalKey("network_devices.autodiscovery", &snmpConfig, opt) + if pkgconfigsetup.Datadog().IsSet("network_devices.autodiscovery") { + err := pkgconfigsetup.Datadog().UnmarshalKey("network_devices.autodiscovery", &snmpConfig, opt) if err != nil { return snmpConfig, err } - } else if coreconfig.Datadog().IsSet("snmp_listener") { - err := coreconfig.Datadog().UnmarshalKey("snmp_listener", &snmpConfig, opt) + } else if pkgconfigsetup.Datadog().IsSet("snmp_listener") { + err := pkgconfigsetup.Datadog().UnmarshalKey("snmp_listener", &snmpConfig, opt) if err != nil { return snmpConfig, err } @@ -182,7 +182,7 @@ func NewListenerConfig() (ListenerConfig, error) { config.PingConfig.Timeout = firstNonNil(config.PingConfig.Timeout, snmpConfig.PingConfig.Timeout) config.PingConfig.Count = firstNonNil(config.PingConfig.Count, snmpConfig.PingConfig.Count) - config.Namespace = firstNonEmpty(config.Namespace, snmpConfig.Namespace, coreconfig.Datadog().GetString("network_devices.namespace")) + config.Namespace = firstNonEmpty(config.Namespace, snmpConfig.Namespace, pkgconfigsetup.Datadog().GetString("network_devices.namespace")) config.Community = firstNonEmpty(config.Community, config.CommunityLegacy) config.AuthKey = firstNonEmpty(config.AuthKey, config.AuthKeyLegacy) config.AuthProtocol = firstNonEmpty(config.AuthProtocol, config.AuthProtocolLegacy) diff --git a/pkg/snmp/snmp_test.go b/pkg/snmp/snmp_test.go index 5230eb3c3b608..bffe8bef122b6 100644 --- a/pkg/snmp/snmp_test.go +++ b/pkg/snmp/snmp_test.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/snmp/snmpintegration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/gosnmp/gosnmp" "github.com/stretchr/testify/assert" @@ -89,10 +89,10 @@ func TestBuildSNMPParams(t *testing.T) { } func TestNewListenerConfig(t *testing.T) { - config.Datadog().SetConfigType("yaml") + pkgconfigsetup.Datadog().SetConfigType("yaml") // default collect_device_metadata should be true - err := config.Datadog().ReadConfig(strings.NewReader(` + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: configs: - network: 127.0.0.1/30 @@ -114,7 +114,7 @@ snmp_listener: assert.Equal(t, false, conf.Configs[2].CollectDeviceMetadata) // collect_device_metadata: false - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: collect_device_metadata: false configs: @@ -137,7 +137,7 @@ snmp_listener: assert.Equal(t, false, conf.Configs[2].CollectDeviceMetadata) // collect_device_metadata: true - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: collect_device_metadata: true configs: @@ -161,10 +161,10 @@ snmp_listener: } func TestNewNetworkDevicesListenerConfig(t *testing.T) { - config.Datadog().SetConfigType("yaml") + pkgconfigsetup.Datadog().SetConfigType("yaml") // default collect_device_metadata should be true - err := config.Datadog().ReadConfig(strings.NewReader(` + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -187,7 +187,7 @@ network_devices: assert.Equal(t, false, conf.Configs[2].CollectDeviceMetadata) // collect_device_metadata: false - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: collect_device_metadata: false @@ -211,7 +211,7 @@ network_devices: assert.Equal(t, false, conf.Configs[2].CollectDeviceMetadata) // collect_device_metadata: true - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: collect_device_metadata: true @@ -236,10 +236,10 @@ network_devices: } func TestBothListenersConfig(t *testing.T) { - config.Datadog().SetConfigType("yaml") + pkgconfigsetup.Datadog().SetConfigType("yaml") // check that network_devices config override the snmp_listener config - err := config.Datadog().ReadConfig(strings.NewReader(` + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: collect_device_metadata: true configs: @@ -272,7 +272,7 @@ network_devices: assert.Equal(t, true, conf.Configs[2].CollectDeviceMetadata) // incorrect snmp_listener config and correct network_devices config - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: configs: - foo: bar @@ -299,7 +299,7 @@ network_devices: assert.Equal(t, true, conf.Configs[2].CollectDeviceMetadata) // incorrect snmp_listener config and correct network_devices config - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: configs: - network: 127.0.0.4/30 @@ -318,8 +318,8 @@ network_devices: } func Test_LoaderConfig(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -338,7 +338,7 @@ network_devices: assert.Equal(t, "core", conf.Configs[1].Loader) assert.Equal(t, "python", conf.Configs[2].Loader) - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: loader: core @@ -361,8 +361,8 @@ network_devices: } func Test_MinCollectionInterval(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: min_collection_interval: 60 @@ -381,8 +381,8 @@ network_devices: } func Test_Configs(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: workers: 10 @@ -438,7 +438,7 @@ network_devices: ///////////////// // legacy configs ///////////////// - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: allowed_failures: 15 @@ -468,8 +468,8 @@ network_devices: func Test_NamespaceConfig(t *testing.T) { // Default Namespace - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -483,8 +483,8 @@ network_devices: assert.Equal(t, "default", networkConf.Namespace) // Custom Namespace in network_devices - config.Datadog().SetConfigType("yaml") - err = config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: namespace: ponyo autodiscovery: @@ -499,8 +499,8 @@ network_devices: assert.Equal(t, "ponyo", networkConf.Namespace) // Custom Namespace in snmp_listener - config.Datadog().SetConfigType("yaml") - err = config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: namespace: totoro @@ -528,8 +528,8 @@ func TestFirstNonEmpty(t *testing.T) { } func Test_UseDeviceIDAsHostname(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: use_device_id_as_hostname: true @@ -548,8 +548,8 @@ network_devices: } func Test_CollectTopology_withRootCollectTopologyFalse(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: collect_topology: false @@ -571,8 +571,8 @@ network_devices: } func Test_CollectTopology_withRootCollectTopologyTrue(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: collect_topology: true @@ -594,8 +594,8 @@ network_devices: } func Test_CollectTopology_withRootCollectTopologyUnset(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: diff --git a/pkg/status/endpoints/status.go b/pkg/status/endpoints/status.go index b613cb04eb7d4..11007c87414d6 100644 --- a/pkg/status/endpoints/status.go +++ b/pkg/status/endpoints/status.go @@ -11,13 +11,13 @@ import ( "io" "github.com/DataDog/datadog-agent/comp/core/status" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" ) // PopulateStatus populates the status stats func PopulateStatus(stats map[string]interface{}) { - endpoints, err := utils.GetMultipleEndpoints(config.Datadog()) + endpoints, err := utils.GetMultipleEndpoints(pkgconfigsetup.Datadog()) if err != nil { stats["endpointsInfos"] = nil return diff --git a/pkg/tagger/types/types.go b/pkg/tagger/types/types.go index 51fc672461c0b..07e9f0f1d1698 100644 --- a/pkg/tagger/types/types.go +++ b/pkg/tagger/types/types.go @@ -21,10 +21,10 @@ const ( // OriginInfo contains the Origin Detection information. type OriginInfo struct { - FromUDS string // FromUDS is the origin resolved using Unix Domain Socket. - FromTag string // FromTag is the origin resolved from tags. - FromMsg string // FromMsg is the origin resolved from the message. - ExternalData string // ExternalData is the external data list. - Cardinality string // Cardinality is the cardinality of the resolved origin. - ProductOrigin ProductOrigin // ProductOrigin is the product that sent the origin information. + ContainerIDFromSocket string // ContainerIDFromSocket is the origin resolved using Unix Domain Socket. + PodUID string // PodUID is the origin resolved from the Kubernetes Pod UID. + ContainerID string // ContainerID is the origin resolved from the container ID. + ExternalData string // ExternalData is the external data list. + Cardinality string // Cardinality is the cardinality of the resolved origin. + ProductOrigin ProductOrigin // ProductOrigin is the product that sent the origin information. } diff --git a/pkg/trace/api/api.go b/pkg/trace/api/api.go index 816a14eef938d..7062efc03e307 100644 --- a/pkg/trace/api/api.go +++ b/pkg/trace/api/api.go @@ -279,17 +279,18 @@ func (r *HTTPReceiver) Start() { if _, err := os.Stat(filepath.Dir(path)); !os.IsNotExist(err) { ln, err := r.listenUnix(path) if err != nil { + log.Errorf("Error creating UDS listener: %v", err) r.telemetryCollector.SendStartupError(telemetry.CantStartUdsServer, err) - killProcess("Error creating UDS listener: %v", err) + } else { + go func() { + defer watchdog.LogOnPanic(r.statsd) + if err := r.server.Serve(ln); err != nil && err != http.ErrServerClosed { + log.Errorf("Could not start UDS server: %v. UDS receiver disabled.", err) + r.telemetryCollector.SendStartupError(telemetry.CantStartUdsServer, err) + } + }() + log.Infof("Listening for traces at unix://%s", path) } - go func() { - defer watchdog.LogOnPanic(r.statsd) - if err := r.server.Serve(ln); err != nil && err != http.ErrServerClosed { - log.Errorf("Could not start UDS server: %v. UDS receiver disabled.", err) - r.telemetryCollector.SendStartupError(telemetry.CantStartUdsServer, err) - } - }() - log.Infof("Listening for traces at unix://%s", path) } else { log.Errorf("Could not start UDS listener: socket directory does not exist: %s", path) } diff --git a/pkg/trace/api/api_nix_test.go b/pkg/trace/api/api_nix_test.go index 8c871ad875f9d..fbefae776493e 100644 --- a/pkg/trace/api/api_nix_test.go +++ b/pkg/trace/api/api_nix_test.go @@ -13,6 +13,7 @@ import ( "fmt" "net" "net/http" + "os" "path/filepath" "testing" "time" @@ -83,6 +84,21 @@ func TestUDS(t *testing.T) { t.Fatalf("expected http.StatusOK, got response: %#v", resp) } }) + + t.Run("uds_permission_err", func(t *testing.T) { + dir := t.TempDir() + err := os.Chmod(dir, 0444) // read-only + assert.NoError(t, err) + + conf := config.New() + conf.Endpoints[0].APIKey = "apikey_2" + conf.ReceiverSocket = filepath.Join(dir, "apm.socket") + + r := newTestReceiverFromConfig(conf) + // should not crash + r.Start() + r.Stop() + }) } func TestHTTPReceiverStart(t *testing.T) { diff --git a/pkg/trace/api/info.go b/pkg/trace/api/info.go index 4ea4a5e5bb509..79d1d55d7d5ee 100644 --- a/pkg/trace/api/info.go +++ b/pkg/trace/api/info.go @@ -10,8 +10,10 @@ import ( "encoding/json" "fmt" "net/http" + "slices" "github.com/DataDog/datadog-agent/pkg/obfuscate" + "github.com/DataDog/datadog-agent/pkg/trace/stats" ) // makeInfoHandler returns a new handler for handling the discovery endpoint. @@ -61,6 +63,17 @@ func (r *HTTPReceiver) makeInfoHandler() (hash string, handler http.HandlerFunc) oconf.Redis = o.Redis oconf.Memcached = o.Memcached } + + // We check that endpoints contains stats, even though we know this version of the + // agent supports it. It's conceivable that the stats endpoint could be disabled at some point + // so this is defensive against that case. + canDropP0 := !r.conf.ProbabilisticSamplerEnabled && slices.Contains(all, "/v0.6/stats") + + var spanKindsStatsComputed []string + if r.conf.ComputeStatsBySpanKind { + spanKindsStatsComputed = stats.KindsComputed + } + txt, err := json.MarshalIndent(struct { Version string `json:"version"` GitCommit string `json:"git_commit"` @@ -72,15 +85,17 @@ func (r *HTTPReceiver) makeInfoHandler() (hash string, handler http.HandlerFunc) EvpProxyAllowedHeaders []string `json:"evp_proxy_allowed_headers"` Config reducedConfig `json:"config"` PeerTags []string `json:"peer_tags"` + SpanKindsStatsComputed []string `json:"span_kinds_stats_computed"` }{ Version: r.conf.AgentVersion, GitCommit: r.conf.GitCommit, Endpoints: all, FeatureFlags: r.conf.AllFeatures(), - ClientDropP0s: true, + ClientDropP0s: canDropP0, SpanMetaStructs: true, LongRunningSpans: true, EvpProxyAllowedHeaders: EvpProxyAllowedHeaders, + SpanKindsStatsComputed: spanKindsStatsComputed, Config: reducedConfig{ DefaultEnv: r.conf.DefaultEnv, TargetTPS: r.conf.TargetTPS, diff --git a/pkg/trace/api/info_test.go b/pkg/trace/api/info_test.go index 0d82e3fd41b2c..d8fc856b6a6be 100644 --- a/pkg/trace/api/info_test.go +++ b/pkg/trace/api/info_test.go @@ -302,6 +302,7 @@ func TestInfoHandler(t *testing.T) { "long_running_spans": nil, "evp_proxy_allowed_headers": nil, "peer_tags": nil, + "span_kinds_stats_computed": nil, "config": map[string]interface{}{ "default_env": nil, "target_tps": nil, diff --git a/pkg/trace/api/otlp.go b/pkg/trace/api/otlp.go index fb1a9d20b58ee..1f645164ba6b6 100644 --- a/pkg/trace/api/otlp.go +++ b/pkg/trace/api/otlp.go @@ -207,7 +207,8 @@ func (o *OTLPReceiver) ReceiveResourceSpans(ctx context.Context, rspans ptrace.R if !srcok { hostFromMap(rattr, "_dd.hostname") } - env := rattr[string(semconv.AttributeDeploymentEnvironment)] + // TODO(songy23): use AttributeDeploymentEnvironmentName once collector version upgrade is unblocked + _, env := getFirstFromMap(rattr, "deployment.environment.name", semconv.AttributeDeploymentEnvironment) lang := rattr[string(semconv.AttributeTelemetrySDKLanguage)] if lang == "" { lang = fastHeaderGet(httpHeader, header.Lang) @@ -588,7 +589,8 @@ func (o *OTLPReceiver) convertSpan(rattr map[string]string, lib pcommon.Instrume return true }) if _, ok := span.Meta["env"]; !ok { - if env := span.Meta[string(semconv.AttributeDeploymentEnvironment)]; env != "" { + // TODO(songy23): use AttributeDeploymentEnvironmentName once collector version upgrade is unblocked + if _, env := getFirstFromMap(span.Meta, "deployment.environment.name", semconv.AttributeDeploymentEnvironment); env != "" { setMetaOTLP(span, "env", traceutil.NormalizeTag(env)) } } diff --git a/pkg/trace/api/otlp_test.go b/pkg/trace/api/otlp_test.go index 05a019f9e7e67..75895a36f99a5 100644 --- a/pkg/trace/api/otlp_test.go +++ b/pkg/trace/api/otlp_test.go @@ -288,6 +288,18 @@ func TestOTLPReceiveResourceSpans(t *testing.T) { require.Equal("depenv", out.Env) }, }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{"deployment.environment.name": "staging"}, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("staging", out.Env) + }, + }, { in: []testutil.OTLPResourceSpan{ { @@ -303,6 +315,21 @@ func TestOTLPReceiveResourceSpans(t *testing.T) { require.Equal("spanenv", out.Env) }, }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + {Attributes: map[string]interface{}{"deployment.environment.name": "spanenv2"}}, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("spanenv2", out.Env) + }, + }, { in: []testutil.OTLPResourceSpan{ { diff --git a/pkg/trace/api/telemetry.go b/pkg/trace/api/telemetry.go index 4a1880cb6e570..a45ac4105042f 100644 --- a/pkg/trace/api/telemetry.go +++ b/pkg/trace/api/telemetry.go @@ -36,6 +36,7 @@ const ( awsLambda cloudResourceType = "AWSLambda" awsFargate cloudResourceType = "AWSFargate" cloudRun cloudResourceType = "GCPCloudRun" + cloudFunctions cloudResourceType = "GCPCloudFunctions" azureAppService cloudResourceType = "AzureAppService" azureContainerApp cloudResourceType = "AzureContainerApp" aws cloudProvider = "AWS" @@ -257,8 +258,9 @@ func (f *TelemetryForwarder) setRequestHeader(req *http.Request) { req.Header.Set(header.ContainerID, containerID) } if containerTags != "" { - req.Header.Set("x-datadog-container-tags", containerTags) - log.Debugf("Setting header x-datadog-container-tags=%s for telemetry proxy", containerTags) + ctagsHeader := normalizeHTTPHeader(containerTags) + req.Header.Set("X-Datadog-Container-Tags", ctagsHeader) + log.Debugf("Setting header X-Datadog-Container-Tags=%s for telemetry proxy", ctagsHeader) } if f.conf.InstallSignature.Found { req.Header.Set("DD-Agent-Install-Id", f.conf.InstallSignature.InstallID) @@ -282,6 +284,12 @@ func (f *TelemetryForwarder) setRequestHeader(req *http.Request) { if serviceName, found := f.conf.GlobalTags["service_name"]; found { req.Header.Set(cloudResourceIdentifierHeader, serviceName) } + case "cloudfunction": + req.Header.Set(cloudProviderHeader, string(gcp)) + req.Header.Set(cloudResourceTypeHeader, string(cloudFunctions)) + if serviceName, found := f.conf.GlobalTags["service_name"]; found { + req.Header.Set(cloudResourceIdentifierHeader, serviceName) + } case "appservice": req.Header.Set(cloudProviderHeader, string(azure)) req.Header.Set(cloudResourceTypeHeader, string(azureAppService)) diff --git a/pkg/trace/api/telemetry_test.go b/pkg/trace/api/telemetry_test.go index 5c37737da22ae..6662572cf3b6a 100644 --- a/pkg/trace/api/telemetry_test.go +++ b/pkg/trace/api/telemetry_test.go @@ -84,6 +84,7 @@ func TestTelemetryBasicProxyRequest(t *testing.T) { assert.Equal("AWS", req.Header.Get("DD-Cloud-Provider")) assert.Equal("AWSLambda", req.Header.Get("DD-Cloud-Resource-Type")) assert.Equal("test_ARN", req.Header.Get("DD-Cloud-Resource-Identifier")) + assert.Equal("key:test_value", req.Header.Get("X-Datadog-Container-Tags")) assert.Equal("/path", req.URL.Path) assert.Equal("", req.Header.Get("User-Agent")) assert.Regexp(regexp.MustCompile("trace-agent.*"), req.Header.Get("Via")) @@ -94,7 +95,11 @@ func TestTelemetryBasicProxyRequest(t *testing.T) { cfg := getTestConfig(srv.URL) cfg.GlobalTags[functionARNKeyTag] = "test_ARN" + cfg.ContainerTags = func(_ string) ([]string, error) { + return []string{"key:test\nvalue"}, nil + } recv := newTestReceiverFromConfig(cfg) + recv.telemetryForwarder.containerIDProvider = getTestContainerIDProvider() assertSendRequest(t, recv, endpointCalled) } diff --git a/pkg/trace/config/config.go b/pkg/trace/config/config.go index 2ada0ea27e84e..a411e1c1ad656 100644 --- a/pkg/trace/config/config.go +++ b/pkg/trace/config/config.go @@ -117,6 +117,13 @@ type ObfuscationConfig struct { CreditCards obfuscate.CreditCardsConfig `mapstructure:"credit_cards"` } +func obfuscationMode(enabled bool) obfuscate.ObfuscationMode { + if enabled { + return obfuscate.ObfuscateOnly + } + return "" +} + // Export returns an obfuscate.Config matching o. func (o *ObfuscationConfig) Export(conf *AgentConfig) obfuscate.Config { return obfuscate.Config{ @@ -126,6 +133,7 @@ func (o *ObfuscationConfig) Export(conf *AgentConfig) obfuscate.Config { KeepSQLAlias: conf.HasFeature("keep_sql_alias"), DollarQuotedFunc: conf.HasFeature("dollar_quoted_func"), Cache: conf.HasFeature("sql_cache"), + ObfuscationMode: obfuscationMode(conf.HasFeature("sqllexer")), }, ES: o.ES, OpenSearch: o.OpenSearch, diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index a16cdf5d8f6a9..dfa1f86ab5200 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -21,7 +21,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 github.com/DataDog/sketches-go v1.4.2 github.com/Microsoft/go-winio v0.6.1 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 @@ -53,7 +53,7 @@ require ( ) require ( - github.com/DataDog/go-sqllexer v0.0.14 // indirect + github.com/DataDog/go-sqllexer v0.0.15 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/zstd v1.5.5 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index 0acbab963039c..dae3a88e3d71e 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -2,12 +2,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= -github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.15 h1:rUUu52dP8EQhJLnUw0MIAxZp0BQx2fOTuMztr3vtHUU= +github.com/DataDog/go-sqllexer v0.0.15/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0 h1:10TPqpTlIkmDPFWVIEZ4ZX3rWrCrx3rEoeoAooZr6LM= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= diff --git a/pkg/trace/stats/otel_util.go b/pkg/trace/stats/otel_util.go index 8c9a5a2970daf..1c72b6fa640b8 100644 --- a/pkg/trace/stats/otel_util.go +++ b/pkg/trace/stats/otel_util.go @@ -49,7 +49,8 @@ func OTLPTracesToConcentratorInputs( if _, exists := ignoreResNames[traceutil.GetOTelResource(otelspan, otelres)]; exists { continue } - env := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeDeploymentEnvironment) + // TODO(songy23): use AttributeDeploymentEnvironmentName once collector version upgrade is unblocked + env := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, "deployment.environment.name", semconv.AttributeDeploymentEnvironment) hostname := traceutil.GetOTelHostname(otelspan, otelres, conf.OTLPReceiver.AttributesTranslator, conf.Hostname) version := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeServiceVersion) cid := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeContainerID, semconv.AttributeK8SPodUID) diff --git a/pkg/trace/stats/otel_util_test.go b/pkg/trace/stats/otel_util_test.go index e2c78f3ef0b73..1422226c8ecc5 100644 --- a/pkg/trace/stats/otel_util_test.go +++ b/pkg/trace/stats/otel_util_test.go @@ -117,6 +117,15 @@ func TestProcessOTLPTraces(t *testing.T) { libname: "spring", expected: createStatsPayload(agentEnv, agentHost, "svc", "op", "http", "client", "res", agentHost, "tracer-env", "", nil, nil, true, false), }, + { + name: "new env convention", + spanName: "spanname2", + rattrs: map[string]string{"service.name": "svc", "deployment.environment.name": "new-env"}, + sattrs: map[string]any{"operation.name": "op", "resource.name": "res"}, + spanKind: ptrace.SpanKindClient, + libname: "spring", + expected: createStatsPayload(agentEnv, agentHost, "svc", "op", "http", "client", "res", agentHost, "new-env", "", nil, nil, true, false), + }, { name: "span operation name from span name with db attribute, peerTagsAggr not enabled", spanName: "spanname3", diff --git a/pkg/trace/stats/oteltest/go.mod b/pkg/trace/stats/oteltest/go.mod index e3466f3cda4f7..c3f51342a5e79 100644 --- a/pkg/trace/stats/oteltest/go.mod +++ b/pkg/trace/stats/oteltest/go.mod @@ -7,7 +7,7 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 @@ -26,7 +26,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/go-sqllexer v0.0.14 // indirect + github.com/DataDog/go-sqllexer v0.0.15 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/sketches-go v1.4.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/pkg/trace/stats/oteltest/go.sum b/pkg/trace/stats/oteltest/go.sum index a928af42bb225..a08668d36b2f8 100644 --- a/pkg/trace/stats/oteltest/go.sum +++ b/pkg/trace/stats/oteltest/go.sum @@ -1,11 +1,11 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= -github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.15 h1:rUUu52dP8EQhJLnUw0MIAxZp0BQx2fOTuMztr3vtHUU= +github.com/DataDog/go-sqllexer v0.0.15/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= diff --git a/pkg/trace/stats/span_concentrator.go b/pkg/trace/stats/span_concentrator.go index d886b9e7495f7..e48f937f267a5 100644 --- a/pkg/trace/stats/span_concentrator.go +++ b/pkg/trace/stats/span_concentrator.go @@ -6,6 +6,7 @@ package stats import ( + "slices" "strings" "sync" "time" @@ -159,12 +160,16 @@ func (sc *SpanConcentrator) NewStatSpan( // computeStatsForSpanKind returns true if the span.kind value makes the span eligible for stats computation. func computeStatsForSpanKind(kind string) bool { k := strings.ToLower(kind) - switch k { - case "server", "consumer", "client", "producer": - return true - default: - return false - } + return slices.Contains(KindsComputed, k) +} + +// KindsComputed is the list of span kinds that will have stats computed on them +// when computeStatsByKind is enabled in the concentrator. +var KindsComputed = []string{ + "server", + "consumer", + "client", + "producer", } func (sc *SpanConcentrator) addSpan(s *StatSpan, aggKey PayloadAggregationKey, containerID string, containerTags []string, origin string, weight float64) { diff --git a/pkg/trace/writer/trace.go b/pkg/trace/writer/trace.go index 2a7b5ab6b9314..d2879dc7f0c2b 100644 --- a/pkg/trace/writer/trace.go +++ b/pkg/trace/writer/trace.go @@ -294,14 +294,14 @@ func (w *TraceWriter) serialize(pl *pb.AgentPayload) { if err != nil { // it will never happen, unless an invalid compression is chosen; // we know gzip.BestSpeed is valid. - log.Errorf("Failed to initialize gzip writer. No traces can be sent: %v", err) + log.Errorf("Failed to initialize %s writer. No traces can be sent: %v", w.compressor.Encoding(), err) return } if _, err := writer.Write(b); err != nil { - log.Errorf("Error gzipping trace payload: %v", err) + log.Errorf("Error %s trace payload: %v", w.compressor.Encoding(), err) } if err := writer.Close(); err != nil { - log.Errorf("Error closing gzip stream when writing trace payload: %v", err) + log.Errorf("Error closing %s stream when writing trace payload: %v", w.compressor.Encoding(), err) } sendPayloads(w.senders, p, w.syncMode) diff --git a/pkg/util/cloudproviders/alibaba/alibaba.go b/pkg/util/cloudproviders/alibaba/alibaba.go index a27ef0ee0580e..3feded8d923e0 100644 --- a/pkg/util/cloudproviders/alibaba/alibaba.go +++ b/pkg/util/cloudproviders/alibaba/alibaba.go @@ -11,7 +11,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" ) @@ -36,16 +36,16 @@ func IsRunningOn(ctx context.Context) bool { var instanceIDFetcher = cachedfetch.Fetcher{ Name: "Alibaba InstanceID", Attempt: func(ctx context.Context) (interface{}, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("cloud provider is disabled by configuration") } endpoint := metadataURL + "/latest/meta-data/instance-id" - res, err := httputils.Get(ctx, endpoint, nil, timeout, config.Datadog()) + res, err := httputils.Get(ctx, endpoint, nil, timeout, pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("Alibaba HostAliases: unable to query metadata endpoint: %s", err) } - maxLength := config.Datadog().GetInt("metadata_endpoints_max_hostname_size") + maxLength := pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size") if len(res) > maxLength { return nil, fmt.Errorf("%v gave a response with length > to %v", endpoint, maxLength) } diff --git a/pkg/util/cloudproviders/alibaba/alibaba_test.go b/pkg/util/cloudproviders/alibaba/alibaba_test.go index 3e45cdb136c1c..e0ebd5ee0c769 100644 --- a/pkg/util/cloudproviders/alibaba/alibaba_test.go +++ b/pkg/util/cloudproviders/alibaba/alibaba_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetHostname(t *testing.T) { @@ -51,7 +51,7 @@ func TestGetNTPHosts(t *testing.T) { defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"alibaba"}) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"alibaba"}) actualHosts := GetNTPHosts(ctx) assert.Equal(t, expectedHosts, actualHosts) diff --git a/pkg/util/cloudproviders/azure/azure.go b/pkg/util/cloudproviders/azure/azure.go index 7d993cda9f8e6..30982aa54957d 100644 --- a/pkg/util/cloudproviders/azure/azure.go +++ b/pkg/util/cloudproviders/azure/azure.go @@ -13,7 +13,8 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" "github.com/DataDog/datadog-agent/pkg/util/hostname/validate" httputils "github.com/DataDog/datadog-agent/pkg/util/http" @@ -73,7 +74,7 @@ var vmIDFetcher = cachedfetch.Fetcher{ Attempt: func(ctx context.Context) (interface{}, error) { res, err := getResponseWithMaxLength(ctx, metadataURL+"/metadata/instance/compute/vmId?api-version=2017-04-02&format=text", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return nil, fmt.Errorf("Azure HostAliases: unable to query metadata VM ID endpoint: %s", err) } @@ -150,16 +151,16 @@ func getResponseWithMaxLength(ctx context.Context, endpoint string, maxLength in } func getResponse(ctx context.Context, url string) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } - return httputils.Get(ctx, url, map[string]string{"Metadata": "true"}, timeout, config.Datadog()) + return httputils.Get(ctx, url, map[string]string{"Metadata": "true"}, timeout, pkgconfigsetup.Datadog()) } // GetHostname returns hostname based on Azure instance metadata. func GetHostname(ctx context.Context) (string, error) { - return getHostnameWithConfig(ctx, config.Datadog()) + return getHostnameWithConfig(ctx, pkgconfigsetup.Datadog()) } var instanceMetaFetcher = cachedfetch.Fetcher{ @@ -196,7 +197,7 @@ func getMetadata(ctx context.Context) (metadata, error) { return metadataInfo, nil } -func getHostnameWithConfig(ctx context.Context, config config.Config) (string, error) { +func getHostnameWithConfig(ctx context.Context, config model.Config) (string, error) { style := config.GetString(hostnameStyleSetting) metadata, err := getMetadata(ctx) if err != nil { diff --git a/pkg/util/cloudproviders/azure/azure_test.go b/pkg/util/cloudproviders/azure/azure_test.go index 0563d553a1cb1..b499115ea3194 100644 --- a/pkg/util/cloudproviders/azure/azure_test.go +++ b/pkg/util/cloudproviders/azure/azure_test.go @@ -17,8 +17,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetAlias(t *testing.T) { @@ -137,7 +137,7 @@ func TestGetNTPHosts(t *testing.T) { defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"azure"}) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"azure"}) actualHosts := GetNTPHosts(ctx) assert.Equal(t, expectedHosts, actualHosts) diff --git a/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go b/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go index 469dc883bf55f..773aa31b06baa 100644 --- a/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go +++ b/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util" ) @@ -28,7 +28,7 @@ var getFqdn = util.Fqdn // //nolint:revive // TODO(PLINT) Fix revive linter func GetHostAliases(_ context.Context) ([]string, error) { - if !config.Datadog().GetBool("cloud_foundry") { + if !pkgconfigsetup.Datadog().GetBool("cloud_foundry") { log.Debugf("cloud_foundry is not enabled in the conf: no cloudfoudry host alias") return nil, nil } @@ -36,7 +36,7 @@ func GetHostAliases(_ context.Context) ([]string, error) { aliases := []string{} // Always send the bosh_id if specified - boshID := config.Datadog().GetString("bosh_id") + boshID := pkgconfigsetup.Datadog().GetString("bosh_id") if boshID != "" { aliases = append(aliases, boshID) } @@ -44,7 +44,7 @@ func GetHostAliases(_ context.Context) ([]string, error) { hostname, _ := os.Hostname() fqdn := getFqdn(hostname) - if config.Datadog().GetBool("cf_os_hostname_aliasing") { + if pkgconfigsetup.Datadog().GetBool("cf_os_hostname_aliasing") { // If set, send os hostname and fqdn as additional aliases aliases = append(aliases, hostname) if fqdn != hostname { diff --git a/pkg/util/cloudproviders/cloudfoundry/garden.go b/pkg/util/cloudproviders/cloudfoundry/garden.go index 382134d4fd03a..aa6e274edd7fd 100644 --- a/pkg/util/cloudproviders/cloudfoundry/garden.go +++ b/pkg/util/cloudproviders/cloudfoundry/garden.go @@ -14,7 +14,7 @@ import ( "code.cloudfoundry.org/garden/client" "code.cloudfoundry.org/garden/client/connection" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" ) @@ -78,8 +78,8 @@ type GardenUtil struct { func GetGardenUtil() (*GardenUtil, error) { globalGardenUtilLock.Lock() defer globalGardenUtilLock.Unlock() - network := config.Datadog().GetString("cloud_foundry_garden.listen_network") - address := config.Datadog().GetString("cloud_foundry_garden.listen_address") + network := pkgconfigsetup.Datadog().GetString("cloud_foundry_garden.listen_network") + address := pkgconfigsetup.Datadog().GetString("cloud_foundry_garden.listen_address") if globalGardenUtil == nil { globalGardenUtil = &GardenUtil{ cli: client.New(connection.New(network, address)), diff --git a/pkg/util/cloudproviders/cloudproviders.go b/pkg/util/cloudproviders/cloudproviders.go index 10c1bd7395351..d9de6ac5fa68c 100644 --- a/pkg/util/cloudproviders/cloudproviders.go +++ b/pkg/util/cloudproviders/cloudproviders.go @@ -12,7 +12,7 @@ import ( "sync" logcomp "github.com/DataDog/datadog-agent/comp/core/log/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -99,8 +99,13 @@ type cloudProviderAliasesDetector struct { callback func(context.Context) ([]string, error) } +// getValidHostAliases is an alias from pkg config +func getValidHostAliases(ctx context.Context) ([]string, error) { + return pkgconfigsetup.GetValidHostAliases(ctx, pkgconfigsetup.Datadog()) +} + var hostAliasesDetectors = []cloudProviderAliasesDetector{ - {name: "config", callback: config.GetValidHostAliases}, + {name: "config", callback: getValidHostAliases}, {name: alibaba.CloudProviderName, callback: alibaba.GetHostAliases}, {name: ec2.CloudProviderName, callback: ec2.GetHostAliases}, {name: azure.CloudProviderName, callback: azure.GetHostAliases}, diff --git a/pkg/util/cloudproviders/gce/gce.go b/pkg/util/cloudproviders/gce/gce.go index 551fc14e8c875..2aa62af44bd01 100644 --- a/pkg/util/cloudproviders/gce/gce.go +++ b/pkg/util/cloudproviders/gce/gce.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" "github.com/DataDog/datadog-agent/pkg/util/common" httputils "github.com/DataDog/datadog-agent/pkg/util/http" @@ -38,7 +38,7 @@ var hostnameFetcher = cachedfetch.Fetcher{ Name: "GCP Hostname", Attempt: func(ctx context.Context) (interface{}, error) { hostname, err := getResponseWithMaxLength(ctx, metadataURL+"/instance/hostname", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to retrieve hostname from GCE: %s", err) } @@ -76,7 +76,7 @@ var nameFetcher = cachedfetch.Fetcher{ Attempt: func(ctx context.Context) (interface{}, error) { return getResponseWithMaxLength(ctx, metadataURL+"/instance/name", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) }, } @@ -85,7 +85,7 @@ var projectIDFetcher = cachedfetch.Fetcher{ Attempt: func(ctx context.Context) (interface{}, error) { projectID, err := getResponseWithMaxLength(ctx, metadataURL+"/project/project-id", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to retrieve project ID from GCE: %s", err) } @@ -123,7 +123,7 @@ var clusterNameFetcher = cachedfetch.Fetcher{ Name: "GCP Cluster Name", Attempt: func(ctx context.Context) (interface{}, error) { clusterName, err := getResponseWithMaxLength(ctx, metadataURL+"/instance/attributes/cluster-name", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to retrieve clustername from GCE: %s", err) } @@ -140,7 +140,7 @@ var publicIPv4Fetcher = cachedfetch.Fetcher{ Name: "GCP Public IP", Attempt: func(ctx context.Context) (interface{}, error) { publicIPv4, err := getResponseWithMaxLength(ctx, metadataURL+"/instance/network-interfaces/0/access-configs/0/external-ip", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to retrieve public IPv4 from GCE: %s", err) } @@ -216,11 +216,11 @@ func getResponseWithMaxLength(ctx context.Context, endpoint string, maxLength in } func getResponse(ctx context.Context, url string) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } - res, err := httputils.Get(ctx, url, map[string]string{"Metadata-Flavor": "Google"}, config.Datadog().GetDuration("gce_metadata_timeout")*time.Millisecond, config.Datadog()) + res, err := httputils.Get(ctx, url, map[string]string{"Metadata-Flavor": "Google"}, pkgconfigsetup.Datadog().GetDuration("gce_metadata_timeout")*time.Millisecond, pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("GCE metadata API error: %s", err) } diff --git a/pkg/util/cloudproviders/gce/gce_tags.go b/pkg/util/cloudproviders/gce/gce_tags.go index 8cd524479b281..1f5b05011aab5 100644 --- a/pkg/util/cloudproviders/gce/gce_tags.go +++ b/pkg/util/cloudproviders/gce/gce_tags.go @@ -13,7 +13,7 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -52,7 +52,7 @@ func getCachedTags(err error) ([]string, error) { // GetTags gets the tags from the GCE api func GetTags(ctx context.Context) ([]string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("cloud provider is disabled by configuration") } @@ -85,7 +85,7 @@ func GetTags(ctx context.Context) ([]string, error) { } if metadata.Project.ProjectID != "" { tags = append(tags, fmt.Sprintf("project:%s", metadata.Project.ProjectID)) - if config.Datadog().GetBool("gce_send_project_id_tag") { + if pkgconfigsetup.Datadog().GetBool("gce_send_project_id_tag") { tags = append(tags, fmt.Sprintf("project_id:%s", metadata.Project.ProjectID)) } } @@ -110,7 +110,7 @@ func GetTags(ctx context.Context) ([]string, error) { // isAttributeExcluded returns whether the attribute key should be excluded from the tags func isAttributeExcluded(attr string) bool { - excludedAttributes := config.Datadog().GetStringSlice("exclude_gce_tags") + excludedAttributes := pkgconfigsetup.Datadog().GetStringSlice("exclude_gce_tags") for _, excluded := range excludedAttributes { if attr == excluded { return true diff --git a/pkg/util/cloudproviders/gce/gce_tags_test.go b/pkg/util/cloudproviders/gce/gce_tags_test.go index 4a7684fa93192..2eae7092d0b92 100644 --- a/pkg/util/cloudproviders/gce/gce_tags_test.go +++ b/pkg/util/cloudproviders/gce/gce_tags_test.go @@ -19,8 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" ) @@ -107,8 +107,8 @@ func TestGetHostTagsWithProjectID(t *testing.T) { server := mockMetadataRequest(t) defer server.Close() defer cache.Cache.Delete(tagsCacheKey) - config.Datadog().SetWithoutSource("gce_send_project_id_tag", true) - defer config.Datadog().SetWithoutSource("gce_send_project_id_tag", false) + pkgconfigsetup.Datadog().SetWithoutSource("gce_send_project_id_tag", true) + defer pkgconfigsetup.Datadog().SetWithoutSource("gce_send_project_id_tag", false) tags, err := GetTags(ctx) require.NoError(t, err) testTags(t, tags, expectedTagsWithProjectID) diff --git a/pkg/util/cloudproviders/gce/gce_test.go b/pkg/util/cloudproviders/gce/gce_test.go index 2503edd0d95d1..501b5a8a1acde 100644 --- a/pkg/util/cloudproviders/gce/gce_test.go +++ b/pkg/util/cloudproviders/gce/gce_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func reset() { @@ -230,7 +230,7 @@ func TestGetNTPHosts(t *testing.T) { defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"gcp"}) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"gcp"}) actualHosts := GetNTPHosts(ctx) assert.Equal(t, expectedHosts, actualHosts) diff --git a/pkg/util/cloudproviders/ibm/ibm.go b/pkg/util/cloudproviders/ibm/ibm.go index f0cce17ce3cc5..00a7ef0c34e81 100644 --- a/pkg/util/cloudproviders/ibm/ibm.go +++ b/pkg/util/cloudproviders/ibm/ibm.go @@ -11,7 +11,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -49,7 +49,7 @@ func getToken(ctx context.Context) (string, time.Time, error) { "Metadata-Flavor": "ibm", }, []byte("{\"expires_in\": 3600}"), - config.Datadog().GetDuration("ibm_metadata_timeout")*time.Second, config.Datadog()) + pkgconfigsetup.Datadog().GetDuration("ibm_metadata_timeout")*time.Second, pkgconfigsetup.Datadog()) if err != nil { token.ExpirationDate = time.Now() return "", time.Time{}, err @@ -82,7 +82,7 @@ func IsRunningOn(ctx context.Context) bool { var instanceIDFetcher = cachedfetch.Fetcher{ Name: "IBM instance name", Attempt: func(ctx context.Context) (interface{}, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("IBM cloud provider is disabled by configuration") } @@ -96,7 +96,7 @@ var instanceIDFetcher = cachedfetch.Fetcher{ map[string]string{ "Authorization": fmt.Sprintf("Bearer %s", t), }, - config.Datadog().GetDuration("ibm_metadata_timeout")*time.Second, config.Datadog()) + pkgconfigsetup.Datadog().GetDuration("ibm_metadata_timeout")*time.Second, pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("IBM HostAliases: unable to query metadata endpoint: %s", err) } diff --git a/pkg/util/cloudproviders/kubernetes/kubernetes.go b/pkg/util/cloudproviders/kubernetes/kubernetes.go index 4354f5edcbd4a..20adb6239171e 100644 --- a/pkg/util/cloudproviders/kubernetes/kubernetes.go +++ b/pkg/util/cloudproviders/kubernetes/kubernetes.go @@ -11,8 +11,8 @@ import ( "context" "fmt" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/hostinfo" ) @@ -34,7 +34,7 @@ func GetHostAliases(ctx context.Context) ([]string, error) { return nil, fmt.Errorf("failed to get node annotations: %w", err) } - for _, annotation := range config.Datadog().GetStringSlice("kubernetes_node_annotations_as_host_aliases") { + for _, annotation := range pkgconfigsetup.Datadog().GetStringSlice("kubernetes_node_annotations_as_host_aliases") { if value, found := annotations[annotation]; found { aliases = append(aliases, value) } diff --git a/pkg/util/cloudproviders/network.go b/pkg/util/cloudproviders/network.go index 12c7496579c3f..80f9b1ae06ad0 100644 --- a/pkg/util/cloudproviders/network.go +++ b/pkg/util/cloudproviders/network.go @@ -9,7 +9,7 @@ import ( "context" "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" "github.com/DataDog/datadog-agent/pkg/util/ec2" @@ -30,8 +30,8 @@ func GetNetworkID(ctx context.Context) (string, error) { return cache.Get[string]( networkIDCacheKey, func() (string, error) { - // the the id from configuration - if networkID := config.Datadog().GetString("network.id"); networkID != "" { + // the id from configuration + if networkID := pkgconfigsetup.Datadog().GetString("network.id"); networkID != "" { log.Debugf("GetNetworkID: using configured network ID: %s", networkID) return networkID, nil } diff --git a/pkg/util/cloudproviders/oracle/oracle.go b/pkg/util/cloudproviders/oracle/oracle.go index 407dbce972997..a63f595fb8794 100644 --- a/pkg/util/cloudproviders/oracle/oracle.go +++ b/pkg/util/cloudproviders/oracle/oracle.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" ) @@ -35,12 +35,12 @@ func IsRunningOn(ctx context.Context) bool { var instanceIDFetcher = cachedfetch.Fetcher{ Name: "Oracle InstanceID", Attempt: func(ctx context.Context) (interface{}, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("Oracle cloud provider is disabled by configuration") } endpoint := metadataURL + "/opc/v2/instance/id" - res, err := httputils.Get(ctx, endpoint, map[string]string{"Authorization": "Bearer Oracle"}, timeout, config.Datadog()) + res, err := httputils.Get(ctx, endpoint, map[string]string{"Authorization": "Bearer Oracle"}, timeout, pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("Oracle HostAliases: unable to query metadata endpoint: %s", err) } @@ -49,7 +49,7 @@ var instanceIDFetcher = cachedfetch.Fetcher{ return nil, fmt.Errorf("Oracle '%s' returned empty id", endpoint) } - maxLength := config.Datadog().GetInt("metadata_endpoints_max_hostname_size") + maxLength := pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size") if len(res) > maxLength { return nil, fmt.Errorf("%v gave a response with length > to %v", endpoint, maxLength) } diff --git a/pkg/util/cloudproviders/oracle/oracle_test.go b/pkg/util/cloudproviders/oracle/oracle_test.go index b429c66cac894..202623a29d277 100644 --- a/pkg/util/cloudproviders/oracle/oracle_test.go +++ b/pkg/util/cloudproviders/oracle/oracle_test.go @@ -15,13 +15,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetHostAliases(t *testing.T) { - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"oracle"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"oracle"}) ctx := context.Background() expected := "ocid1.instance.oc1.iad.anuwcljte6cuweqcz7sarpn43hst2kaaaxbbbccbaaa6vpd66tvcyhgiifsq" @@ -45,9 +45,9 @@ func TestGetHostAliases(t *testing.T) { } func TestGetNTPHosts(t *testing.T) { - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"oracle"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"oracle"}) ctx := context.Background() expectedHosts := []string{"169.254.169.254"} diff --git a/pkg/util/cloudproviders/tencent/tencent.go b/pkg/util/cloudproviders/tencent/tencent.go index cc792f30cb553..798ce358dcf76 100644 --- a/pkg/util/cloudproviders/tencent/tencent.go +++ b/pkg/util/cloudproviders/tencent/tencent.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" ) @@ -44,7 +44,7 @@ func GetHostAliases(ctx context.Context) ([]string, error) { var instanceIDFetcher = cachedfetch.Fetcher{ Name: "Tencent InstanceID", Attempt: func(ctx context.Context) (interface{}, error) { - res, err := getMetadataItemWithMaxLength(ctx, metadataURL+"/meta-data/instance-id", config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + res, err := getMetadataItemWithMaxLength(ctx, metadataURL+"/meta-data/instance-id", pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to get TencentCloud CVM instanceID: %s", err) } @@ -79,11 +79,11 @@ func getMetadataItemWithMaxLength(ctx context.Context, endpoint string, maxLengt } func getMetadataItem(ctx context.Context, endpoint string) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } - res, err := httputils.Get(ctx, endpoint, nil, timeout, config.Datadog()) + res, err := httputils.Get(ctx, endpoint, nil, timeout, pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("unable to fetch Tencent Metadata API, %s", err) } diff --git a/pkg/util/cloudproviders/tencent/tencent_test.go b/pkg/util/cloudproviders/tencent/tencent_test.go index e8f7f7cff39e3..b220a6527693d 100644 --- a/pkg/util/cloudproviders/tencent/tencent_test.go +++ b/pkg/util/cloudproviders/tencent/tencent_test.go @@ -15,14 +15,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetInstanceID(t *testing.T) { ctx := context.Background() - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) expected := "ins-nad6bga0" var lastRequest *http.Request @@ -42,9 +42,9 @@ func TestGetInstanceID(t *testing.T) { func TestGetHostAliases(t *testing.T) { ctx := context.Background() - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) expected := "ins-nad6bga0" var lastRequest *http.Request @@ -64,9 +64,9 @@ func TestGetHostAliases(t *testing.T) { } func TestGetNTPHosts(t *testing.T) { - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) ctx := context.Background() expectedHosts := []string{"ntpupdate.tencentyun.com"} diff --git a/pkg/util/clusteragent/clcrunner.go b/pkg/util/clusteragent/clcrunner.go index aa8ab93c7e589..31b6bbb83cbbd 100644 --- a/pkg/util/clusteragent/clcrunner.go +++ b/pkg/util/clusteragent/clcrunner.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -58,7 +58,7 @@ func GetCLCRunnerClient() (CLCRunnerClientInterface, error) { func (c *CLCRunnerClient) init() { c.initErr = nil - authToken, err := security.GetClusterAgentAuthToken(config.Datadog()) + authToken, err := security.GetClusterAgentAuthToken(pkgconfigsetup.Datadog()) if err != nil { c.initErr = err return @@ -74,7 +74,7 @@ func (c *CLCRunnerClient) init() { c.clcRunnerAPIClient.Timeout = 2 * time.Second // Set http port used by the CLC Runners - c.clcRunnerPort = config.Datadog().GetInt("cluster_checks.clc_runners_port") + c.clcRunnerPort = pkgconfigsetup.Datadog().GetInt("cluster_checks.clc_runners_port") } // GetVersion fetches the version of the CLC Runner diff --git a/pkg/util/clusteragent/clcrunner_test.go b/pkg/util/clusteragent/clcrunner_test.go index 849e5ffdea8bf..dce6ad6286bac 100644 --- a/pkg/util/clusteragent/clcrunner_test.go +++ b/pkg/util/clusteragent/clcrunner_test.go @@ -22,9 +22,9 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -50,7 +50,7 @@ func newDummyCLCRunner() (*dummyCLCRunner, error) { "/api/v1/clcrunner/stats": `{"http_check:My Nginx Service:b0041608e66d20ba":{"AverageExecutionTime":241,"MetricSamples":3},"kube_apiserver_metrics:c5d2d20ccb4bb880":{"AverageExecutionTime":858,"MetricSamples":1562},"":{"AverageExecutionTime":100,"MetricSamples":10}}`, "/api/v1/clcrunner/workers": `{"Count":2,"Instances":{"worker_1":{"Utilization":0.1},"worker_2":{"Utilization":0.2}}}`, }, - token: config.Datadog().GetString("cluster_agent.auth_token"), + token: pkgconfigsetup.Datadog().GetString("cluster_agent.auth_token"), requests: make(chan *http.Request, 100), } return clcRunner, nil @@ -233,7 +233,7 @@ func TestCLCRunnerSuite(t *testing.T) { }) s := &clcRunnerSuite{conf: configmock.New(t)} - config.Datadog().SetConfigFile(f.Name()) + pkgconfigsetup.Datadog().SetConfigFile(f.Name()) s.authTokenPath = filepath.Join(fakeDir, clcRunnerAuthTokenFilename) _, err = os.Stat(s.authTokenPath) require.NotNil(t, err, fmt.Sprintf("%v", err)) diff --git a/pkg/util/clusteragent/clusteragent.go b/pkg/util/clusteragent/clusteragent.go index 46b2ddb98eb81..ab6d18cb04aa5 100644 --- a/pkg/util/clusteragent/clusteragent.go +++ b/pkg/util/clusteragent/clusteragent.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -125,14 +125,14 @@ func (c *DCAClient) init() error { return err } - authToken, err := security.GetClusterAgentAuthToken(config.Datadog()) + authToken, err := security.GetClusterAgentAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } c.clusterAgentAPIRequestHeaders = http.Header{} c.clusterAgentAPIRequestHeaders.Set(authorizationHeaderKey, fmt.Sprintf("Bearer %s", authToken)) - podIP := config.Datadog().GetString("clc_runner_host") + podIP := pkgconfigsetup.Datadog().GetString("clc_runner_host") c.clusterAgentAPIRequestHeaders.Set(RealIPHeader, podIP) if err := c.initHTTPClient(); err != nil { @@ -140,7 +140,7 @@ func (c *DCAClient) init() error { } // Run DCA connection refresh - c.startReconnectHandler(time.Duration(config.Datadog().GetInt64("cluster_agent.client_reconnect_period_seconds")) * time.Second) + c.startReconnectHandler(time.Duration(pkgconfigsetup.Datadog().GetInt64("cluster_agent.client_reconnect_period_seconds")) * time.Second) log.Infof("Successfully connected to the Datadog Cluster Agent %s", c.clusterAgentVersion.String()) return nil @@ -228,7 +228,7 @@ func GetClusterAgentEndpoint() (string, error) { const configDcaURL = "cluster_agent.url" const configDcaSvcName = "cluster_agent.kubernetes_service_name" - dcaURL := config.Datadog().GetString(configDcaURL) + dcaURL := pkgconfigsetup.Datadog().GetString(configDcaURL) if dcaURL != "" { if strings.HasPrefix(dcaURL, "http://") { return "", fmt.Errorf("cannot get cluster agent endpoint, not a https scheme: %s", dcaURL) @@ -250,7 +250,7 @@ func GetClusterAgentEndpoint() (string, error) { // Construct the URL with the Kubernetes service environment variables // *_SERVICE_HOST and *_SERVICE_PORT - dcaSvc := config.Datadog().GetString(configDcaSvcName) + dcaSvc := pkgconfigsetup.Datadog().GetString(configDcaSvcName) log.Debugf("Identified service for the Datadog Cluster Agent: %s", dcaSvc) if dcaSvc == "" { return "", fmt.Errorf("cannot get a cluster agent endpoint, both %s and %s are empty", configDcaURL, configDcaSvcName) diff --git a/pkg/util/common.go b/pkg/util/common.go index 49c5c20504a31..5403e6b25cffb 100644 --- a/pkg/util/common.go +++ b/pkg/util/common.go @@ -18,7 +18,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -176,13 +176,13 @@ func GetJSONSerializableMap(m interface{}) interface{} { // GetGoRoutinesDump returns the stack trace of every Go routine of a running Agent. func GetGoRoutinesDump() (string, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } pprofURL := fmt.Sprintf("http://%v:%s/debug/pprof/goroutine?debug=2", - ipcAddress, config.Datadog().GetString("expvar_port")) + ipcAddress, pkgconfigsetup.Datadog().GetString("expvar_port")) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() client := http.Client{} diff --git a/pkg/util/containerd/containerd_util.go b/pkg/util/containerd/containerd_util.go index 502b237db28eb..9b80304c894b4 100644 --- a/pkg/util/containerd/containerd_util.go +++ b/pkg/util/containerd/containerd_util.go @@ -18,8 +18,8 @@ import ( "github.com/opencontainers/image-spec/identity" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" dderrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" @@ -91,9 +91,9 @@ func NewContainerdUtil() (ContainerdItf, error) { // (workloadmeta, checks, etc.) might need to fetch info from different // namespaces at the same time. containerdUtil := &ContainerdUtil{ - queryTimeout: config.Datadog().GetDuration("cri_query_timeout") * time.Second, - connectionTimeout: config.Datadog().GetDuration("cri_connection_timeout") * time.Second, - socketPath: config.Datadog().GetString("cri_socket_path"), + queryTimeout: pkgconfigsetup.Datadog().GetDuration("cri_query_timeout") * time.Second, + connectionTimeout: pkgconfigsetup.Datadog().GetDuration("cri_connection_timeout") * time.Second, + socketPath: pkgconfigsetup.Datadog().GetString("cri_socket_path"), } if containerdUtil.socketPath == "" { log.Info("No socket path was specified, defaulting to /var/run/containerd/containerd.sock") diff --git a/pkg/util/containerd/namespaces.go b/pkg/util/containerd/namespaces.go index 65e7dab453402..2647ce65501bc 100644 --- a/pkg/util/containerd/namespaces.go +++ b/pkg/util/containerd/namespaces.go @@ -11,14 +11,14 @@ import ( "context" "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NamespacesToWatch returns the namespaces to watch. If the // "containerd_namespace" option has been set, it returns the namespaces it contains. // Otherwise, it returns all of them. func NamespacesToWatch(ctx context.Context, containerdClient ContainerdItf) ([]string, error) { - if namespaces := config.Datadog().GetStringSlice("containerd_namespaces"); len(namespaces) > 0 { + if namespaces := pkgconfigsetup.Datadog().GetStringSlice("containerd_namespaces"); len(namespaces) > 0 { return namespaces, nil } @@ -27,7 +27,7 @@ func NamespacesToWatch(ctx context.Context, containerdClient ContainerdItf) ([]s return nil, err } - excludeNamespaces := config.Datadog().GetStringSlice("containerd_exclude_namespaces") + excludeNamespaces := pkgconfigsetup.Datadog().GetStringSlice("containerd_exclude_namespaces") if len(excludeNamespaces) == 0 { return namespaces, nil } @@ -55,8 +55,8 @@ func NamespacesToWatch(ctx context.Context, containerdClient ContainerdItf) ([]s // namespace that we need to watch is "ns1", this function returns // `topic=="/container/create",namespace=="ns1"`. func FiltersWithNamespaces(filters []string) []string { - namespaces := config.Datadog().GetStringSlice("containerd_namespaces") - excludeNamespaces := config.Datadog().GetStringSlice("containerd_exclude_namespaces") + namespaces := pkgconfigsetup.Datadog().GetStringSlice("containerd_namespaces") + excludeNamespaces := pkgconfigsetup.Datadog().GetStringSlice("containerd_exclude_namespaces") if len(namespaces) == 0 && len(excludeNamespaces) == 0 { // Watch all namespaces. No need to add them to the filters. diff --git a/pkg/util/containerd/namespaces_test.go b/pkg/util/containerd/namespaces_test.go index 373e1903f9ebe..451e4282ee62d 100644 --- a/pkg/util/containerd/namespaces_test.go +++ b/pkg/util/containerd/namespaces_test.go @@ -12,7 +12,7 @@ import ( "errors" "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containerd/fake" "github.com/stretchr/testify/assert" @@ -68,16 +68,16 @@ func TestNamespacesToWatch(t *testing.T) { }, } - originalContainerdNamespacesOpt := config.Datadog().GetStringSlice("containerd_namespaces") - originalExcludeNamespacesOpt := config.Datadog().GetStringSlice("containerd_exclude_namespaces") + originalContainerdNamespacesOpt := pkgconfigsetup.Datadog().GetStringSlice("containerd_namespaces") + originalExcludeNamespacesOpt := pkgconfigsetup.Datadog().GetStringSlice("containerd_exclude_namespaces") for _, test := range tests { t.Run(test.name, func(t *testing.T) { - config.Datadog().SetWithoutSource("containerd_namespaces", test.containerdNamespaceVal) - defer config.Datadog().SetWithoutSource("containerd_namespaces", originalContainerdNamespacesOpt) + pkgconfigsetup.Datadog().SetWithoutSource("containerd_namespaces", test.containerdNamespaceVal) + defer pkgconfigsetup.Datadog().SetWithoutSource("containerd_namespaces", originalContainerdNamespacesOpt) - config.Datadog().SetWithoutSource("containerd_exclude_namespaces", test.excludeNamespaceVal) - defer config.Datadog().SetWithoutSource("containerd_exclude_namespaces", originalExcludeNamespacesOpt) + pkgconfigsetup.Datadog().SetWithoutSource("containerd_exclude_namespaces", test.excludeNamespaceVal) + defer pkgconfigsetup.Datadog().SetWithoutSource("containerd_exclude_namespaces", originalExcludeNamespacesOpt) namespaces, err := NamespacesToWatch(context.TODO(), test.client) @@ -154,16 +154,16 @@ func TestFiltersWithNamespaces(t *testing.T) { }, } - originalContainerdNamespacesOpt := config.Datadog().GetStringSlice("containerd_namespaces") - originalExcludeNamespacesOpt := config.Datadog().GetStringSlice("containerd_exclude_namespaces") + originalContainerdNamespacesOpt := pkgconfigsetup.Datadog().GetStringSlice("containerd_namespaces") + originalExcludeNamespacesOpt := pkgconfigsetup.Datadog().GetStringSlice("containerd_exclude_namespaces") for _, test := range tests { t.Run(test.name, func(t *testing.T) { - config.Datadog().SetWithoutSource("containerd_namespaces", test.containerdNamespaceVal) - defer config.Datadog().SetWithoutSource("containerd_namespaces", originalContainerdNamespacesOpt) + pkgconfigsetup.Datadog().SetWithoutSource("containerd_namespaces", test.containerdNamespaceVal) + defer pkgconfigsetup.Datadog().SetWithoutSource("containerd_namespaces", originalContainerdNamespacesOpt) - config.Datadog().SetWithoutSource("containerd_exclude_namespaces", test.excludeNamespaceVal) - defer config.Datadog().SetWithoutSource("containerd_exclude_namespaces", originalExcludeNamespacesOpt) + pkgconfigsetup.Datadog().SetWithoutSource("containerd_exclude_namespaces", test.excludeNamespaceVal) + defer pkgconfigsetup.Datadog().SetWithoutSource("containerd_exclude_namespaces", originalExcludeNamespacesOpt) result := FiltersWithNamespaces(test.inputFilters) assert.ElementsMatch(t, test.expectedFilters, result) diff --git a/pkg/util/containers/cri/util.go b/pkg/util/containers/cri/util.go index 5c4fb3769733d..d4f6d6b53d164 100644 --- a/pkg/util/containers/cri/util.go +++ b/pkg/util/containers/cri/util.go @@ -23,7 +23,7 @@ import ( criv1alpha2 "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "github.com/DataDog/datadog-agent/internal/third_party/kubernetes/pkg/kubelet/cri/remote/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" ) @@ -112,9 +112,9 @@ func (c *CRIUtil) init() error { func GetUtil() (*CRIUtil, error) { once.Do(func() { globalCRIUtil = &CRIUtil{ - queryTimeout: config.Datadog().GetDuration("cri_query_timeout") * time.Second, - connectionTimeout: config.Datadog().GetDuration("cri_connection_timeout") * time.Second, - socketPath: config.Datadog().GetString("cri_socket_path"), + queryTimeout: pkgconfigsetup.Datadog().GetDuration("cri_query_timeout") * time.Second, + connectionTimeout: pkgconfigsetup.Datadog().GetDuration("cri_connection_timeout") * time.Second, + socketPath: pkgconfigsetup.Datadog().GetString("cri_socket_path"), } globalCRIUtil.initRetry.SetupRetrier(&retry.Config{ //nolint:errcheck Name: "criutil", diff --git a/pkg/util/containers/metrics/docker/collector_linux.go b/pkg/util/containers/metrics/docker/collector_linux.go index b2cb0cb96b5f6..b82b78c737399 100644 --- a/pkg/util/containers/metrics/docker/collector_linux.go +++ b/pkg/util/containers/metrics/docker/collector_linux.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/api/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cgroups" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/system" @@ -77,7 +77,7 @@ func convertIOStats(ioStats *types.BlkioStats) *provider.ContainerIOStats { Devices: make(map[string]provider.DeviceIOStats), } - procPath := config.Datadog().GetString("container_proc_root") + procPath := pkgconfigsetup.Datadog().GetString("container_proc_root") deviceMapping, err := system.GetDiskDeviceMapping(procPath) if err != nil { log.Debugf("Error while getting disk mapping, no disk metric will be present, err: %v", err) diff --git a/pkg/util/containers/metrics/docker/collector_linux_test.go b/pkg/util/containers/metrics/docker/collector_linux_test.go index 1db2a10a40491..1588d66cd4821 100644 --- a/pkg/util/containers/metrics/docker/collector_linux_test.go +++ b/pkg/util/containers/metrics/docker/collector_linux_test.go @@ -11,7 +11,7 @@ import ( "os" "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/DataDog/datadog-agent/pkg/util/system" @@ -190,7 +190,7 @@ func Test_convertIOStats(t *testing.T) { assert.Nil(t, err) defer os.Remove(dir + "/diskstats") - config.Datadog().SetWithoutSource("container_proc_root", dir) + pkgconfigsetup.Datadog().SetWithoutSource("container_proc_root", dir) for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/util/containers/metrics/system/collector_linux.go b/pkg/util/containers/metrics/system/collector_linux.go index 9eec6d9846a2d..0c8eabdfd7f91 100644 --- a/pkg/util/containers/metrics/system/collector_linux.go +++ b/pkg/util/containers/metrics/system/collector_linux.go @@ -17,8 +17,8 @@ import ( "github.com/hashicorp/go-multierror" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cgroups" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -58,12 +58,12 @@ func newSystemCollector(cache *provider.Cache, wlm optional.Option[workloadmeta. var collectorMetadata provider.CollectorMetadata var cf cgroups.ReaderFilter - procPath := config.Datadog().GetString("container_proc_root") + procPath := pkgconfigsetup.Datadog().GetString("container_proc_root") if strings.HasPrefix(procPath, "/host") { hostPrefix = "/host" } - if useTrie := config.Datadog().GetBool("use_improved_cgroup_parser"); useTrie { + if useTrie := pkgconfigsetup.Datadog().GetBool("use_improved_cgroup_parser"); useTrie { var w workloadmeta.Component unwrapped, ok := wlm.Get() if ok { @@ -80,7 +80,7 @@ func newSystemCollector(cache *provider.Cache, wlm optional.Option[workloadmeta. cgroups.WithProcPath(procPath), cgroups.WithHostPrefix(hostPrefix), cgroups.WithReaderFilter(cf), - cgroups.WithPIDMapper(config.Datadog().GetString("container_pid_mapper")), + cgroups.WithPIDMapper(pkgconfigsetup.Datadog().GetString("container_pid_mapper")), ) if err != nil { // Cgroup provider is pretty static. Except not having required mounts, it should always work. diff --git a/pkg/util/core.go b/pkg/util/core.go index f26db5e2b934d..77649ea1d53f3 100644 --- a/pkg/util/core.go +++ b/pkg/util/core.go @@ -13,11 +13,11 @@ import ( "golang.org/x/sys/unix" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // SetupCoreDump enables core dumps and sets the core dump size limit based on configuration -func SetupCoreDump(cfg config.Reader) error { +func SetupCoreDump(cfg model.Reader) error { if cfg.GetBool("go_core_dump") { debug.SetTraceback("crash") diff --git a/pkg/util/core_windows.go b/pkg/util/core_windows.go index 483290c592d47..c11422c8aff2b 100644 --- a/pkg/util/core_windows.go +++ b/pkg/util/core_windows.go @@ -8,11 +8,11 @@ package util import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // SetupCoreDump enables core dumps and sets the core dump size limit based on configuration -func SetupCoreDump(cfg config.Reader) error { +func SetupCoreDump(cfg model.Reader) error { if cfg.GetBool("go_core_dump") { return fmt.Errorf("Not supported on Windows") } diff --git a/pkg/util/crashreport/crashreport.go b/pkg/util/crashreport/crashreport.go index b49529a835037..0bab9ca167051 100644 --- a/pkg/util/crashreport/crashreport.go +++ b/pkg/util/crashreport/crashreport.go @@ -13,7 +13,7 @@ import ( sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/system/wincrashdetect/probe" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" process_net "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" @@ -102,7 +102,7 @@ func (wcr *WinCrashReporter) CheckForCrash() (*probe.WinCrashStatus, error) { return nil, nil } sysProbeUtil, err := process_net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { return nil, wcr.handleStartupError(err) } diff --git a/pkg/util/docker/docker_util.go b/pkg/util/docker/docker_util.go index 7fd5c6f4f6fa4..29503d9ebef4f 100644 --- a/pkg/util/docker/docker_util.go +++ b/pkg/util/docker/docker_util.go @@ -25,7 +25,7 @@ import ( "github.com/docker/docker/client" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" dderrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -53,7 +53,7 @@ type DockerUtil struct { // init makes an empty DockerUtil bootstrap itself. // This is not exposed as public API but is called by the retrier embed. func (d *DockerUtil) init() error { - d.queryTimeout = config.Datadog().GetDuration("docker_query_timeout") * time.Second + d.queryTimeout = pkgconfigsetup.Datadog().GetDuration("docker_query_timeout") * time.Second // Major failure risk is here, do that first ctx, cancel := context.WithTimeout(context.Background(), d.queryTimeout) diff --git a/pkg/util/docker/event_stream.go b/pkg/util/docker/event_stream.go index 9d2f28def0d39..3fafe14fc28c5 100644 --- a/pkg/util/docker/event_stream.go +++ b/pkg/util/docker/event_stream.go @@ -17,7 +17,7 @@ import ( "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -157,7 +157,7 @@ func eventFilters() filters.Args { res.Add("event", string(containerEventAction)) } - if config.Datadog().GetBool("container_image.enabled") { + if pkgconfigsetup.Datadog().GetBool("container_image.enabled") { res.Add("type", string(events.ImageEventType)) for _, imageEventAction := range imageEventActions { res.Add("event", string(imageEventAction)) diff --git a/pkg/util/ec2/dmi.go b/pkg/util/ec2/dmi.go index a56c4d5f17595..8e8e99c49d36c 100644 --- a/pkg/util/ec2/dmi.go +++ b/pkg/util/ec2/dmi.go @@ -9,14 +9,15 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/google/uuid" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/dmi" "github.com/DataDog/datadog-agent/pkg/util/fargate" - "github.com/google/uuid" ) func isBoardVendorEC2() bool { - if !config.Datadog().GetBool("ec2_use_dmi") { + if !pkgconfigsetup.Datadog().GetBool("ec2_use_dmi") { return false } return dmi.GetBoardVendor() == DMIBoardVendor @@ -32,7 +33,7 @@ func getInstanceIDFromDMI() (string, error) { return "", fmt.Errorf("host alias detection through DMI is disabled on Fargate") } - if !config.Datadog().GetBool("ec2_use_dmi") { + if !pkgconfigsetup.Datadog().GetBool("ec2_use_dmi") { return "", fmt.Errorf("'ec2_use_dmi' is disabled") } @@ -57,7 +58,7 @@ func getInstanceIDFromDMI() (string, error) { // Depending on the instance type either the DMI product UUID or the hypervisor UUID is available. In both case, if they // start with "ec2" we return true. func isEC2UUID() bool { - if !config.Datadog().GetBool("ec2_use_dmi") { + if !pkgconfigsetup.Datadog().GetBool("ec2_use_dmi") { return false } diff --git a/pkg/util/ec2/dmi_test.go b/pkg/util/ec2/dmi_test.go index e21cc6657778b..cdd9068d0bd7e 100644 --- a/pkg/util/ec2/dmi_test.go +++ b/pkg/util/ec2/dmi_test.go @@ -8,15 +8,16 @@ package ec2 import ( "testing" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/stretchr/testify/assert" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/dmi" - "github.com/stretchr/testify/assert" ) func TestIsBoardVendorEC2(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) setupDMIForNotEC2(t) assert.False(t, isBoardVendorEC2()) @@ -25,13 +26,13 @@ func TestIsBoardVendorEC2(t *testing.T) { assert.True(t, isBoardVendorEC2()) configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", false) assert.False(t, isBoardVendorEC2()) } func TestGetInstanceIDFromDMI(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) setupDMIForNotEC2(t) instanceID, err := getInstanceIDFromDMI() @@ -44,14 +45,14 @@ func TestGetInstanceIDFromDMI(t *testing.T) { assert.Equal(t, "i-myinstance", instanceID) configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", false) _, err = getInstanceIDFromDMI() assert.Error(t, err) } func TestIsEC2UUID(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) // no UUID dmi.SetupMock(t, "", "", "", "") @@ -78,7 +79,7 @@ func TestIsEC2UUID(t *testing.T) { func TestIsEC2UUIDSwapEndian(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) // hypervisor dmi.SetupMock(t, "45E12AEC-DCD1-B213-94ED-012345ABCDEF", "", "", "") diff --git a/pkg/util/ec2/ec2.go b/pkg/util/ec2/ec2.go index d4c8025ced09f..05cef31e3488c 100644 --- a/pkg/util/ec2/ec2.go +++ b/pkg/util/ec2/ec2.go @@ -14,8 +14,8 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -192,7 +192,7 @@ func GetNTPHosts(ctx context.Context) []string { // GetClusterName returns the name of the cluster containing the current EC2 instance func GetClusterName(ctx context.Context) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } tags, err := fetchTagsFromCache(ctx) @@ -222,7 +222,7 @@ func extractClusterName(tags []string) (string, error) { // IsDefaultHostname returns whether the given hostname is a default one for EC2 func IsDefaultHostname(hostname string) bool { - return isDefaultHostname(hostname, config.Datadog().GetBool("ec2_use_windows_prefix_detection")) + return isDefaultHostname(hostname, pkgconfigsetup.Datadog().GetBool("ec2_use_windows_prefix_detection")) } // IsDefaultHostnameForIntake returns whether the given hostname is a default one for EC2 for the intake diff --git a/pkg/util/ec2/ec2_account_id.go b/pkg/util/ec2/ec2_account_id.go index 32e9cfb9463c3..4316462e00e2e 100644 --- a/pkg/util/ec2/ec2_account_id.go +++ b/pkg/util/ec2/ec2_account_id.go @@ -11,12 +11,12 @@ import ( "context" "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // GetAccountID returns the account ID of the current AWS instance func GetAccountID(ctx context.Context) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } diff --git a/pkg/util/ec2/ec2_tags.go b/pkg/util/ec2/ec2_tags.go index ecd05fa8cda7f..d60f93bf6256b 100644 --- a/pkg/util/ec2/ec2_tags.go +++ b/pkg/util/ec2/ec2_tags.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -31,7 +31,7 @@ var ( ) func isTagExcluded(tag string) bool { - if excludedTags := config.Datadog().GetStringSlice("exclude_ec2_tags"); excludedTags != nil { + if excludedTags := pkgconfigsetup.Datadog().GetStringSlice("exclude_ec2_tags"); excludedTags != nil { for _, excludedTag := range excludedTags { if tag == excludedTag { return true @@ -42,7 +42,7 @@ func isTagExcluded(tag string) bool { } func fetchEc2Tags(ctx context.Context) ([]string, error) { - if config.Datadog().GetBool("collect_ec2_tags_use_imds") { + if pkgconfigsetup.Datadog().GetBool("collect_ec2_tags_use_imds") { // prefer to fetch tags from IMDS, falling back to the API tags, err := fetchEc2TagsFromIMDS(ctx) if err == nil { @@ -123,7 +123,7 @@ func getTagsWithCreds(ctx context.Context, instanceIdentity *EC2Identity, awsCre // We want to use 'ec2_metadata_timeout' here instead of current context. 'ctx' comes from the agent main and will // only be canceled if the agent is stopped. The default timeout for the AWS SDK is 1 minutes (20s timeout with // 3 retries). Since we call getTagsWithCreds twice in a row, it can be a 2 minutes latency. - ctx, cancel := context.WithTimeout(ctx, config.Datadog().GetDuration("ec2_metadata_timeout")*time.Millisecond) + ctx, cancel := context.WithTimeout(ctx, pkgconfigsetup.Datadog().GetDuration("ec2_metadata_timeout")*time.Millisecond) defer cancel() ec2Tags, err := connection.DescribeTags(ctx, @@ -155,7 +155,7 @@ func getTagsWithCreds(ctx context.Context, instanceIdentity *EC2Identity, awsCre var fetchTags = fetchEc2Tags func fetchTagsFromCache(ctx context.Context) ([]string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("cloud provider is disabled by configuration") } diff --git a/pkg/util/ec2/ec2_tags_test.go b/pkg/util/ec2/ec2_tags_test.go index 5707990bb698e..cce8a5800df2c 100644 --- a/pkg/util/ec2/ec2_tags_test.go +++ b/pkg/util/ec2/ec2_tags_test.go @@ -19,8 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" ) @@ -38,7 +38,7 @@ func TestGetIAMRole(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() val, err := getIAMRole(ctx) @@ -63,7 +63,7 @@ func TestGetSecurityCreds(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() cred, err := getSecurityCreds(ctx) @@ -83,7 +83,7 @@ func TestGetInstanceIdentity(t *testing.T) { })) defer ts.Close() instanceIdentityURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() val, err := GetInstanceIdentity(ctx) @@ -112,7 +112,7 @@ func TestFetchEc2TagsFromIMDS(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() confMock := configmock.New(t) @@ -133,7 +133,7 @@ func TestFetchEc2TagsFromIMDSError(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() _, err := fetchEc2TagsFromIMDS(ctx) diff --git a/pkg/util/ec2/ec2_test.go b/pkg/util/ec2/ec2_test.go index d92242dada46f..a37576ee1f1c0 100644 --- a/pkg/util/ec2/ec2_test.go +++ b/pkg/util/ec2/ec2_test.go @@ -18,20 +18,22 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/dmi" httputils "github.com/DataDog/datadog-agent/pkg/util/http" ) var ( - initialTimeout = time.Duration(config.Datadog().GetInt("ec2_metadata_timeout")) * time.Millisecond + initialTimeout = time.Duration(pkgconfigsetup.Datadog().GetInt("ec2_metadata_timeout")) * time.Millisecond initialMetadataURL = metadataURL initialTokenURL = tokenURL ) +const testIMDSToken = "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==" + func resetPackageVars() { - config.Datadog().SetWithoutSource("ec2_metadata_timeout", initialTimeout) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", initialTimeout) metadataURL = initialMetadataURL tokenURL = initialTokenURL token = httputils.NewAPIToken(getToken) @@ -53,11 +55,11 @@ func setupDMIForNotEC2(t *testing.T) { func TestIsDefaultHostname(t *testing.T) { const key = "ec2_use_windows_prefix_detection" - prefixDetection := config.Datadog().GetBool(key) - defer config.Datadog().SetDefault(key, prefixDetection) + prefixDetection := pkgconfigsetup.Datadog().GetBool(key) + defer pkgconfigsetup.Datadog().SetDefault(key, prefixDetection) for _, prefix := range []bool{true, false} { - config.Datadog().SetDefault(key, prefix) + pkgconfigsetup.Datadog().SetDefault(key, prefix) assert.True(t, IsDefaultHostname("IP-FOO")) assert.True(t, IsDefaultHostname("domuarigato")) @@ -68,9 +70,9 @@ func TestIsDefaultHostname(t *testing.T) { func TestIsDefaultHostnameForIntake(t *testing.T) { const key = "ec2_use_windows_prefix_detection" - prefixDetection := config.Datadog().GetBool(key) - config.Datadog().SetDefault(key, true) - defer config.Datadog().SetDefault(key, prefixDetection) + prefixDetection := pkgconfigsetup.Datadog().GetBool(key) + pkgconfigsetup.Datadog().SetDefault(key, true) + defer pkgconfigsetup.Datadog().SetDefault(key, prefixDetection) assert.True(t, IsDefaultHostnameForIntake("IP-FOO")) assert.True(t, IsDefaultHostnameForIntake("domuarigato")) @@ -91,7 +93,7 @@ func TestGetInstanceID(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() // API errors out, should return error @@ -166,9 +168,9 @@ func TestGetHostAliases(t *testing.T) { configmock.New(t) if tc.disableDMI { - config.Datadog().SetWithoutSource("ec2_use_dmi", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", false) } else { - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { @@ -185,7 +187,7 @@ func TestGetHostAliases(t *testing.T) { defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() ctx := context.Background() @@ -211,7 +213,7 @@ func TestGetHostname(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() // API errors out, should return error @@ -301,12 +303,11 @@ func TestExtractClusterName(t *testing.T) { func TestGetToken(t *testing.T) { ctx := context.Background() - originalToken := "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") h := r.Header.Get("X-aws-ec2-metadata-token-ttl-seconds") if h != "" && r.Method == http.MethodPut { - io.WriteString(w, originalToken) + io.WriteString(w, testIMDSToken) } else { w.WriteHeader(http.StatusNotFound) } @@ -314,12 +315,12 @@ func TestGetToken(t *testing.T) { defer ts.Close() tokenURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() token, err := token.Get(ctx) require.NoError(t, err) - assert.Equal(t, originalToken, token) + assert.Equal(t, testIMDSToken, token) } func TestMetedataRequestWithToken(t *testing.T) { @@ -327,11 +328,10 @@ func TestMetedataRequestWithToken(t *testing.T) { var requestForToken *http.Request var requestWithToken *http.Request var seq int - config.Datadog().SetDefault("ec2_prefer_imdsv2", true) + pkgconfigsetup.Datadog().SetDefault("ec2_prefer_imdsv2", true) ctx := context.Background() ipv4 := "198.51.100.1" - tok := "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") @@ -345,11 +345,11 @@ func TestMetedataRequestWithToken(t *testing.T) { r.Header.Add("X-sequence", fmt.Sprintf("%v", seq)) seq++ requestForToken = r - io.WriteString(w, tok) + io.WriteString(w, testIMDSToken) case http.MethodGet: // Should be a metadata request t := r.Header.Get("X-aws-ec2-metadata-token") - if t != tok { + if t != testIMDSToken { r.Header.Add("X-sequence", fmt.Sprintf("%v", seq)) seq++ requestWithoutToken = r @@ -372,7 +372,7 @@ func TestMetedataRequestWithToken(t *testing.T) { defer ts.Close() metadataURL = ts.URL tokenURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() ips, err := GetPublicIPv4(ctx) @@ -383,10 +383,10 @@ func TestMetedataRequestWithToken(t *testing.T) { assert.Equal(t, "0", requestForToken.Header.Get("X-sequence")) assert.Equal(t, "1", requestWithToken.Header.Get("X-sequence")) - assert.Equal(t, fmt.Sprint(config.Datadog().GetInt("ec2_metadata_token_lifetime")), requestForToken.Header.Get("X-aws-ec2-metadata-token-ttl-seconds")) + assert.Equal(t, fmt.Sprint(pkgconfigsetup.Datadog().GetInt("ec2_metadata_token_lifetime")), requestForToken.Header.Get("X-aws-ec2-metadata-token-ttl-seconds")) assert.Equal(t, http.MethodPut, requestForToken.Method) assert.Equal(t, "/", requestForToken.RequestURI) - assert.Equal(t, tok, requestWithToken.Header.Get("X-aws-ec2-metadata-token")) + assert.Equal(t, testIMDSToken, requestWithToken.Header.Get("X-aws-ec2-metadata-token")) assert.Equal(t, "/public-ipv4", requestWithToken.RequestURI) assert.Equal(t, http.MethodGet, requestWithToken.Method) @@ -411,7 +411,7 @@ func TestMetedataRequestWithToken(t *testing.T) { func TestMetedataRequestWithoutToken(t *testing.T) { var requestWithoutToken *http.Request - config.Datadog().SetDefault("ec2_prefer_imdsv2", false) + pkgconfigsetup.Datadog().SetDefault("ec2_prefer_imdsv2", false) ipv4 := "198.51.100.1" @@ -438,7 +438,7 @@ func TestMetedataRequestWithoutToken(t *testing.T) { defer ts.Close() metadataURL = ts.URL tokenURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() ips, err := GetPublicIPv4(context.Background()) @@ -464,7 +464,7 @@ func TestGetNTPHostsFromIMDS(t *testing.T) { func TestGetNTPHostsDMI(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) setupDMIForEC2(t) defer resetPackageVars() @@ -476,7 +476,7 @@ func TestGetNTPHostsDMI(t *testing.T) { func TestGetNTPHostsEC2UUID(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) dmi.SetupMock(t, "ec2something", "", "", "") defer resetPackageVars() @@ -488,7 +488,7 @@ func TestGetNTPHostsEC2UUID(t *testing.T) { func TestGetNTPHostsDisabledDMI(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", false) // DMI without EC2 UUID dmi.SetupMock(t, "something", "something", "i-myinstance", DMIBoardVendor) @@ -515,7 +515,7 @@ func TestMetadataSourceIMDS(t *testing.T) { w.Header().Set("Content-Type", "text/plain") switch r.Method { case http.MethodPut: // token request - io.WriteString(w, "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==") + io.WriteString(w, testIMDSToken) case http.MethodGet: // metadata request switch r.RequestURI { case "/hostname": @@ -533,8 +533,8 @@ func TestMetadataSourceIMDS(t *testing.T) { tokenURL = ts.URL defer resetPackageVars() configmock.New(t) - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) - config.Datadog().SetWithoutSource("ec2_prefer_imdsv2", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_prefer_imdsv2", true) assert.True(t, IsRunningOn(ctx)) assert.Equal(t, metadataSourceIMDSv2, currentMetadataSource) @@ -542,7 +542,7 @@ func TestMetadataSourceIMDS(t *testing.T) { // trying IMDSv1 hostnameFetcher.Reset() currentMetadataSource = metadataSourceNone - config.Datadog().SetWithoutSource("ec2_prefer_imdsv2", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_prefer_imdsv2", false) assert.True(t, IsRunningOn(ctx)) assert.Equal(t, metadataSourceIMDSv1, currentMetadataSource) @@ -550,7 +550,7 @@ func TestMetadataSourceIMDS(t *testing.T) { func TestMetadataSourceUUID(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) ctx := context.Background() @@ -572,7 +572,7 @@ func TestMetadataSourceUUID(t *testing.T) { func TestMetadataSourceDMI(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) ctx := context.Background() @@ -586,7 +586,7 @@ func TestMetadataSourceDMI(t *testing.T) { func TestMetadataSourceDMIPreventFallback(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) ctx := context.Background() diff --git a/pkg/util/ec2/imds_helpers.go b/pkg/util/ec2/imds_helpers.go index 510fad39f43c4..845d75b9c1471 100644 --- a/pkg/util/ec2/imds_helpers.go +++ b/pkg/util/ec2/imds_helpers.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -25,7 +25,7 @@ var ( ) func getToken(ctx context.Context) (string, time.Time, error) { - tokenLifetime := time.Duration(config.Datadog().GetInt("ec2_metadata_token_lifetime")) * time.Second + tokenLifetime := time.Duration(pkgconfigsetup.Datadog().GetInt("ec2_metadata_token_lifetime")) * time.Second // Set the local expiration date before requesting the metadata endpoint so the local expiration date will always // expire before the expiration date computed on the AWS side. The expiration date is set minus the renewal window // to ensure the token will be refreshed before it expires. @@ -37,7 +37,7 @@ func getToken(ctx context.Context) (string, time.Time, error) { "X-aws-ec2-metadata-token-ttl-seconds": fmt.Sprintf("%d", int(tokenLifetime.Seconds())), }, nil, - config.Datadog().GetDuration("ec2_metadata_timeout")*time.Millisecond, config.Datadog()) + pkgconfigsetup.Datadog().GetDuration("ec2_metadata_timeout")*time.Millisecond, pkgconfigsetup.Datadog()) if err != nil { return "", time.Now(), err } @@ -50,7 +50,7 @@ func getMetadataItemWithMaxLength(ctx context.Context, endpoint string, forceIMD return result, err } - maxLength := config.Datadog().GetInt("metadata_endpoints_max_hostname_size") + maxLength := pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size") if len(result) > maxLength { return "", fmt.Errorf("%v gave a response with length > to %v", endpoint, maxLength) } @@ -58,7 +58,7 @@ func getMetadataItemWithMaxLength(ctx context.Context, endpoint string, forceIMD } func getMetadataItem(ctx context.Context, endpoint string, forceIMDSv2 bool) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } @@ -67,7 +67,7 @@ func getMetadataItem(ctx context.Context, endpoint string, forceIMDSv2 bool) (st // UseIMDSv2 returns true if the agent should use IMDSv2 func UseIMDSv2(forceIMDSv2 bool) bool { - return config.Datadog().GetBool("ec2_prefer_imdsv2") || forceIMDSv2 + return pkgconfigsetup.Datadog().GetBool("ec2_prefer_imdsv2") || forceIMDSv2 } func doHTTPRequest(ctx context.Context, url string, forceIMDSv2 bool) (string, error) { @@ -77,7 +77,7 @@ func doHTTPRequest(ctx context.Context, url string, forceIMDSv2 bool) (string, e tokenValue, err := token.Get(ctx) if err != nil { if forceIMDSv2 { - return "", fmt.Errorf("Could not fetch token from IMDSv2") + return "", fmt.Errorf("could not fetch token from IMDSv2") } log.Warnf("ec2_prefer_imdsv2 is set to true in the configuration but the agent was unable to proceed: %s", err) } else { @@ -87,7 +87,7 @@ func doHTTPRequest(ctx context.Context, url string, forceIMDSv2 bool) (string, e } } } - res, err := httputils.Get(ctx, url, headers, time.Duration(config.Datadog().GetInt("ec2_metadata_timeout"))*time.Millisecond, config.Datadog()) + res, err := httputils.Get(ctx, url, headers, time.Duration(pkgconfigsetup.Datadog().GetInt("ec2_metadata_timeout"))*time.Millisecond, pkgconfigsetup.Datadog()) // We don't want to register the source when we force imdsv2 if err == nil && !forceIMDSv2 { setCloudProviderSource(source) diff --git a/pkg/util/ec2/network.go b/pkg/util/ec2/network.go index 5fafa6bed62d7..a7fa4730513a7 100644 --- a/pkg/util/ec2/network.go +++ b/pkg/util/ec2/network.go @@ -30,9 +30,9 @@ func GetPublicIPv4(ctx context.Context) (string, error) { var networkIDFetcher = cachedfetch.Fetcher{ Name: "VPC IDs", Attempt: func(ctx context.Context) (interface{}, error) { - resp, err := getMetadataItem(ctx, imdsNetworkMacs, false) + resp, err := getMetadataItem(ctx, imdsNetworkMacs, true) if err != nil { - return "", err + return "", fmt.Errorf("EC2: GetNetworkID failed to get mac addresses: %w", err) } macs := strings.Split(strings.TrimSpace(resp), "\n") @@ -43,9 +43,9 @@ var networkIDFetcher = cachedfetch.Fetcher{ continue } mac = strings.TrimSuffix(mac, "/") - id, err := getMetadataItem(ctx, fmt.Sprintf("%s/%s/vpc-id", imdsNetworkMacs, mac), false) + id, err := getMetadataItem(ctx, fmt.Sprintf("%s/%s/vpc-id", imdsNetworkMacs, mac), true) if err != nil { - return "", err + return "", fmt.Errorf("EC2: GetNetworkID failed to get vpc id for mac %s: %w", mac, err) } vpcIDs.Add(id) } diff --git a/pkg/util/ec2/network_test.go b/pkg/util/ec2/network_test.go index 7fa773b41b888..577fd9cc5da75 100644 --- a/pkg/util/ec2/network_test.go +++ b/pkg/util/ec2/network_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetPublicIPv4(t *testing.T) { @@ -23,17 +23,22 @@ func TestGetPublicIPv4(t *testing.T) { ip := "10.0.0.2" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") - switch r.RequestURI { - case "/public-ipv4": - io.WriteString(w, ip) - default: - w.WriteHeader(http.StatusNotFound) + switch r.Method { + case http.MethodPut: // token request + io.WriteString(w, testIMDSToken) + case http.MethodGet: // metadata request + switch r.RequestURI { + case "/public-ipv4": + io.WriteString(w, ip) + default: + w.WriteHeader(http.StatusNotFound) + } } })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() val, err := GetPublicIPv4(ctx) @@ -47,19 +52,25 @@ func TestGetNetworkID(t *testing.T) { vpc := "vpc-12345" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") - switch r.RequestURI { - case "/network/interfaces/macs": - io.WriteString(w, mac+"/") - case "/network/interfaces/macs/00:00:00:00:00/vpc-id": - io.WriteString(w, vpc) - default: - w.WriteHeader(http.StatusNotFound) + switch r.Method { + case http.MethodPut: // token request + io.WriteString(w, testIMDSToken) + case http.MethodGet: // metadata request + switch r.RequestURI { + case "/network/interfaces/macs": + io.WriteString(w, mac+"/") + case "/network/interfaces/macs/00:00:00:00:00/vpc-id": + io.WriteString(w, vpc) + default: + w.WriteHeader(http.StatusNotFound) + } } })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + tokenURL = ts.URL + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() val, err := GetNetworkID(ctx) @@ -69,18 +80,25 @@ func TestGetNetworkID(t *testing.T) { func TestGetInstanceIDNoMac(t *testing.T) { ctx := context.Background() - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - io.WriteString(w, "") + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + switch r.Method { + case http.MethodPut: // token request + io.WriteString(w, testIMDSToken) + case http.MethodGet: // metadata request + io.WriteString(w, "") + } })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + tokenURL = ts.URL + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() _, err := GetNetworkID(ctx) require.Error(t, err) - assert.Contains(t, err.Error(), "no mac addresses returned") + assert.Contains(t, err.Error(), "EC2: GetNetworkID no mac addresses returned") } func TestGetInstanceIDMultipleVPC(t *testing.T) { @@ -91,22 +109,28 @@ func TestGetInstanceIDMultipleVPC(t *testing.T) { vpc2 := "vpc-6789" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") - switch r.RequestURI { - case "/network/interfaces/macs": - io.WriteString(w, mac+"/\n") - io.WriteString(w, mac2+"/\n") - case "/network/interfaces/macs/00:00:00:00:00/vpc-id": - io.WriteString(w, vpc) - case "/network/interfaces/macs/00:00:00:00:01/vpc-id": - io.WriteString(w, vpc2) - default: - w.WriteHeader(http.StatusNotFound) + switch r.Method { + case http.MethodPut: // token request + io.WriteString(w, testIMDSToken) + case http.MethodGet: // metadata request + switch r.RequestURI { + case "/network/interfaces/macs": + io.WriteString(w, mac+"/\n") + io.WriteString(w, mac2+"/\n") + case "/network/interfaces/macs/00:00:00:00:00/vpc-id": + io.WriteString(w, vpc) + case "/network/interfaces/macs/00:00:00:00:01/vpc-id": + io.WriteString(w, vpc2) + default: + w.WriteHeader(http.StatusNotFound) + } } })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + tokenURL = ts.URL + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() _, err := GetNetworkID(ctx) diff --git a/pkg/util/ecs/common/common.go b/pkg/util/ecs/common/common.go index ea0aada4925aa..db68f594aac66 100644 --- a/pkg/util/ecs/common/common.go +++ b/pkg/util/ecs/common/common.go @@ -9,7 +9,7 @@ package common import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // CloudProviderName contains the inventory name of for ECS @@ -17,5 +17,5 @@ const CloudProviderName = "AWS" // MetadataTimeout defines timeout for ECS metadata endpoints func MetadataTimeout() time.Duration { - return config.Datadog().GetDuration("ecs_metadata_timeout") * time.Millisecond + return pkgconfigsetup.Datadog().GetDuration("ecs_metadata_timeout") * time.Millisecond } diff --git a/pkg/util/ecs/detection.go b/pkg/util/ecs/detection.go index 498623ca0bc36..a946f87e09a06 100644 --- a/pkg/util/ecs/detection.go +++ b/pkg/util/ecs/detection.go @@ -11,7 +11,7 @@ import ( "context" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/ecs/common" ecsmeta "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata" @@ -27,7 +27,7 @@ const ( // HasEC2ResourceTags returns whether the metadata endpoint in ECS exposes // resource tags. func HasEC2ResourceTags() bool { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return false } return queryCacheBool(hasEC2ResourceTagsCacheKey, func() (bool, time.Duration) { @@ -60,7 +60,7 @@ func HasFargateResourceTags(ctx context.Context) bool { } func queryCacheBool(cacheKey string, cacheMissEvalFunc func() (bool, time.Duration)) bool { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return false } if cachedValue, found := cache.Cache.Get(cacheKey); found { diff --git a/pkg/util/ecs/metadata/clients.go b/pkg/util/ecs/metadata/clients.go index 819ca629de774..027dfe6082c52 100644 --- a/pkg/util/ecs/metadata/clients.go +++ b/pkg/util/ecs/metadata/clients.go @@ -13,7 +13,7 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" @@ -50,7 +50,7 @@ type util struct { // endpoint, by detecting the endpoint address. Returns an error if it was not // possible to detect the endpoint address. func V1() (v1.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("Cloud Provider %s is disabled by configuration", common.CloudProviderName) } @@ -73,7 +73,7 @@ func V1() (v1.Client, error) { // V2 returns a client for the ECS metadata API v2 that uses the default // endpoint address. func V2() (v2.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("Cloud Provider %s is disabled by configuration", common.CloudProviderName) } @@ -99,7 +99,7 @@ func V2() (v2.Client, error) { // error if it was not possible to detect the endpoint address. // v4 metadata API is preferred over v3 if both are available. func V3orV4FromCurrentTask() (v3or4.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("Cloud Provider %s is disabled by configuration", common.CloudProviderName) } @@ -123,7 +123,7 @@ func V3orV4FromCurrentTask() (v3or4.Client, error) { // the endpoint address from the task the executable is running in. Returns an // error if it was not possible to detect the endpoint address. func V4FromCurrentTask() (v3or4.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("Cloud Provider %s is disabled by configuration", common.CloudProviderName) } diff --git a/pkg/util/ecs/metadata/clients_nodocker.go b/pkg/util/ecs/metadata/clients_nodocker.go index a73cb38fd75b0..1ce3edcd3d7a7 100644 --- a/pkg/util/ecs/metadata/clients_nodocker.go +++ b/pkg/util/ecs/metadata/clients_nodocker.go @@ -10,7 +10,7 @@ package metadata import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/ecs/common" @@ -29,7 +29,7 @@ func V1() (*v1.Client, error) { // V2 returns a client for the ECS metadata API v2 that uses the default // endpoint address. func V2() (*v2.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("cloud Provider %s is disabled by configuration", common.CloudProviderName) } diff --git a/pkg/util/ecs/metadata/detection.go b/pkg/util/ecs/metadata/detection.go index 58d5814d1c783..c3ac6b96c06ba 100644 --- a/pkg/util/ecs/metadata/detection.go +++ b/pkg/util/ecs/metadata/detection.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/system" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" v1 "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata/v1" @@ -41,8 +41,8 @@ const ( func detectAgentV1URL() (string, error) { urls := make([]string, 0, 3) - if len(config.Datadog().GetString("ecs_agent_url")) > 0 { - urls = append(urls, config.Datadog().GetString("ecs_agent_url")) + if len(pkgconfigsetup.Datadog().GetString("ecs_agent_url")) > 0 { + urls = append(urls, pkgconfigsetup.Datadog().GetString("ecs_agent_url")) } if env.IsContainerized() { @@ -54,7 +54,7 @@ func detectAgentV1URL() (string, error) { urls = append(urls, agentURLS...) } // Try the default gateway - gw, err := system.GetDefaultGateway(config.Datadog().GetString("proc_root")) + gw, err := system.GetDefaultGateway(pkgconfigsetup.Datadog().GetString("proc_root")) if err != nil { log.Debugf("Could not get docker default gateway: %s", err) } @@ -88,7 +88,7 @@ func getAgentV1ContainerURLs(ctx context.Context) ([]string, error) { if err != nil { return nil, err } - ecsConfig, err := du.Inspect(ctx, config.Datadog().GetString("ecs_agent_container_name"), false) + ecsConfig, err := du.Inspect(ctx, pkgconfigsetup.Datadog().GetString("ecs_agent_container_name"), false) if err != nil { return nil, err } diff --git a/pkg/util/ecs/metadata/detection_test.go b/pkg/util/ecs/metadata/detection_test.go index a88a5b815e9dd..69e48d9a8f30b 100644 --- a/pkg/util/ecs/metadata/detection_test.go +++ b/pkg/util/ecs/metadata/detection_test.go @@ -18,8 +18,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata/testutil" @@ -36,7 +36,7 @@ func TestLocateECSHTTP(t *testing.T) { ts := ecsinterface.Start() defer ts.Close() - config.Datadog().SetDefault("ecs_agent_url", ts.URL) + pkgconfigsetup.Datadog().SetDefault("ecs_agent_url", ts.URL) _, err = newAutodetectedClientV1() require.NoError(t, err) @@ -59,7 +59,7 @@ func TestLocateECSHTTPFail(t *testing.T) { ts := ecsinterface.Start() defer ts.Close() - config.Datadog().SetDefault("ecs_agent_url", ts.URL) + pkgconfigsetup.Datadog().SetDefault("ecs_agent_url", ts.URL) _, err = newAutodetectedClientV1() require.Error(t, err) @@ -74,11 +74,11 @@ func TestLocateECSHTTPFail(t *testing.T) { } func TestGetAgentV1ContainerURLs(t *testing.T) { - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) ctx := context.Background() - config.Datadog().SetDefault("ecs_agent_container_name", "ecs-agent-custom") - defer config.Datadog().SetDefault("ecs_agent_container_name", "ecs-agent") + pkgconfigsetup.Datadog().SetDefault("ecs_agent_container_name", "ecs-agent-custom") + defer pkgconfigsetup.Datadog().SetDefault("ecs_agent_container_name", "ecs-agent") // Setting mocked data in cache nets := make(map[string]*network.EndpointSettings) diff --git a/pkg/util/fargate/detection.go b/pkg/util/fargate/detection.go index 20a7d80aa0c12..56dd0e0e05267 100644 --- a/pkg/util/fargate/detection.go +++ b/pkg/util/fargate/detection.go @@ -8,8 +8,8 @@ package fargate import ( "errors" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // IsFargateInstance returns whether the Agent is running in Fargate. @@ -30,7 +30,7 @@ func GetOrchestrator() OrchestratorName { // GetEKSFargateNodename returns the node name in EKS Fargate func GetEKSFargateNodename() (string, error) { - if nodename := config.Datadog().GetString("kubernetes_kubelet_nodename"); nodename != "" { + if nodename := pkgconfigsetup.Datadog().GetString("kubernetes_kubelet_nodename"); nodename != "" { return nodename, nil } return "", errors.New("kubernetes_kubelet_nodename is not defined, make sure DD_KUBERNETES_KUBELET_NODENAME is set via the downward API") diff --git a/pkg/util/flavor/go.mod b/pkg/util/flavor/go.mod index 2e595d37fa9ca..71a89144c735e 100644 --- a/pkg/util/flavor/go.mod +++ b/pkg/util/flavor/go.mod @@ -12,7 +12,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../filesystem @@ -38,6 +40,8 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect @@ -66,7 +70,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -74,12 +78,12 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/util/flavor/go.sum b/pkg/util/flavor/go.sum index 77ba213060c82..765bdc23a7bf4 100644 --- a/pkg/util/flavor/go.sum +++ b/pkg/util/flavor/go.sum @@ -180,8 +180,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -251,15 +252,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +296,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -312,8 +313,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/util/hostname/common.go b/pkg/util/hostname/common.go index 968316db8adcb..059076775949c 100644 --- a/pkg/util/hostname/common.go +++ b/pkg/util/hostname/common.go @@ -13,8 +13,8 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/azure" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" "github.com/DataDog/datadog-agent/pkg/util/ec2" @@ -39,7 +39,7 @@ var ( type Data = hostnameinterface.Data func fromConfig(ctx context.Context, _ string) (string, error) { - configName := config.Datadog().GetString("hostname") + configName := pkgconfigsetup.Datadog().GetString("hostname") err := validate.ValidHostname(configName) if err != nil { return "", err @@ -50,7 +50,7 @@ func fromConfig(ctx context.Context, _ string) (string, error) { func fromHostnameFile(ctx context.Context, _ string) (string, error) { // Try `hostname_file` config option next - hostnameFilepath := config.Datadog().GetString("hostname_file") + hostnameFilepath := pkgconfigsetup.Datadog().GetString("hostname_file") if hostnameFilepath == "" { return "", fmt.Errorf("'hostname_file' configuration is not enabled") } @@ -91,7 +91,7 @@ func fromFQDN(ctx context.Context, _ string) (string, error) { return "", fmt.Errorf("FQDN hostname is not usable") } - if config.Datadog().GetBool("hostname_fqdn") { + if pkgconfigsetup.Datadog().GetBool("hostname_fqdn") { fqdn, err := fqdnHostname() if err == nil { return fqdn, nil @@ -127,7 +127,7 @@ func fromEC2(ctx context.Context, currentHostname string) (string, error) { // We use the instance id if we're on an ECS cluster or we're on EC2 // and the hostname is one of the default ones - prioritizeEC2Hostname := config.Datadog().GetBool("ec2_prioritize_instance_id_as_hostname") + prioritizeEC2Hostname := pkgconfigsetup.Datadog().GetBool("ec2_prioritize_instance_id_as_hostname") log.Debugf("Detected a default EC2 hostname: %v", ec2.IsDefaultHostname(currentHostname)) log.Debugf("ec2_prioritize_instance_id_as_hostname is set to %v", prioritizeEC2Hostname) diff --git a/pkg/util/hostname/common_test.go b/pkg/util/hostname/common_test.go index 8788ea698c393..920a2bf850cf1 100644 --- a/pkg/util/hostname/common_test.go +++ b/pkg/util/hostname/common_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/ec2" "github.com/DataDog/datadog-agent/pkg/util/fargate" ) @@ -24,7 +24,7 @@ import ( func TestFromConfig(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("hostname", "test-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "test-hostname") hostname, err := fromConfig(context.TODO(), "") require.NoError(t, err) @@ -33,7 +33,7 @@ func TestFromConfig(t *testing.T) { func TestFromConfigInvalid(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("hostname", "hostname_with_underscore") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "hostname_with_underscore") _, err := fromConfig(context.TODO(), "") assert.Error(t, err) @@ -50,7 +50,7 @@ func setupHostnameFile(t *testing.T, content string) { require.NoError(t, err, "Could not write to tmp file %s: %s", destFile.Name(), err) configmock.New(t) - config.Datadog().SetWithoutSource("hostname_file", destFile.Name()) + pkgconfigsetup.Datadog().SetWithoutSource("hostname_file", destFile.Name()) destFile.Close() } @@ -73,7 +73,7 @@ func TestFromHostnameFileWhitespaceTrim(t *testing.T) { func TestFromHostnameFileNoFileName(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("hostname_file", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname_file", "") _, err := fromHostnameFile(context.TODO(), "") assert.NotNil(t, err) @@ -113,12 +113,12 @@ func TestFromFQDN(t *testing.T) { fqdnHostname = func() (string, error) { return "fqdn-hostname", nil } configmock.New(t) - config.Datadog().SetWithoutSource("hostname_fqdn", false) + pkgconfigsetup.Datadog().SetWithoutSource("hostname_fqdn", false) _, err := fromFQDN(context.TODO(), "") assert.Error(t, err) - config.Datadog().SetWithoutSource("hostname_fqdn", true) + pkgconfigsetup.Datadog().SetWithoutSource("hostname_fqdn", true) hostname, err := fromFQDN(context.TODO(), "") assert.NoError(t, err) @@ -167,7 +167,7 @@ func TestFromEc2Prioritize(t *testing.T) { // to true we use the instance ID defer func() { ec2GetInstanceID = ec2.GetInstanceID }() configmock.New(t) - config.Datadog().SetWithoutSource("ec2_prioritize_instance_id_as_hostname", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_prioritize_instance_id_as_hostname", true) // make AWS provider return an error ec2GetInstanceID = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } diff --git a/pkg/util/hostname/os_hostname_linux.go b/pkg/util/hostname/os_hostname_linux.go index 793c747eb7cdf..afb9075a46a24 100644 --- a/pkg/util/hostname/os_hostname_linux.go +++ b/pkg/util/hostname/os_hostname_linux.go @@ -10,7 +10,7 @@ package hostname import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/system" ) @@ -19,7 +19,7 @@ import ( // in a non-root UTS namespace because in that case, the OS hostname characterizes the // identity of the agent container and not the one of the nodes it is running on. func isOSHostnameUsable(_ context.Context) bool { - if config.Datadog().GetBool("hostname_trust_uts_namespace") { + if pkgconfigsetup.Datadog().GetBool("hostname_trust_uts_namespace") { return true } diff --git a/pkg/util/hostname/providers_test.go b/pkg/util/hostname/providers_test.go index e95605acb92ab..9060f9678a6d7 100644 --- a/pkg/util/hostname/providers_test.go +++ b/pkg/util/hostname/providers_test.go @@ -15,9 +15,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/azure" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" @@ -63,7 +63,7 @@ func setupHostnameTest(t *testing.T, tc testCase) { configmock.New(t) if tc.configHostname { - config.Datadog().SetWithoutSource("hostname", "hostname-from-configuration") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "hostname-from-configuration") } if tc.hostnameFile { setupHostnameFile(t, "hostname-from-file") @@ -89,7 +89,7 @@ func setupHostnameTest(t *testing.T, tc testCase) { if tc.FQDN || tc.FQDNEC2 { // making isOSHostnameUsable return true osHostnameUsable = func(context.Context) bool { return true } - config.Datadog().SetWithoutSource("hostname_fqdn", true) + pkgconfigsetup.Datadog().SetWithoutSource("hostname_fqdn", true) if !tc.FQDNEC2 { fqdnHostname = func() (string, error) { return "hostname-from-fqdn", nil } } else { @@ -118,7 +118,7 @@ func setupHostnameTest(t *testing.T, tc testCase) { } if tc.EC2Proritized { - config.Datadog().SetWithoutSource("ec2_prioritize_instance_id_as_hostname", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_prioritize_instance_id_as_hostname", true) } } diff --git a/pkg/util/hostname/warnings.go b/pkg/util/hostname/warnings.go index 5efafbc831a61..d578d4d7b48c6 100644 --- a/pkg/util/hostname/warnings.go +++ b/pkg/util/hostname/warnings.go @@ -10,7 +10,7 @@ import ( "os" "runtime" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/ec2" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -26,7 +26,7 @@ func isHostnameCanonicalForIntake(ctx context.Context, hostname string) bool { } func warnIfNotCanonicalHostname(ctx context.Context, hostname string) { - if !isHostnameCanonicalForIntake(ctx, hostname) && !config.Datadog().GetBool("hostname_force_config_as_canonical") { + if !isHostnameCanonicalForIntake(ctx, hostname) && !pkgconfigsetup.Datadog().GetBool("hostname_force_config_as_canonical") { log.Warnf( "Hostname '%s' defined in configuration will not be used as the in-app hostname. "+ "For more information: https://dtdg.co/agent-hostname-force-config-as-canonical", @@ -49,7 +49,7 @@ func warnAboutFQDN(ctx context.Context, hostname string) { // We have a FQDN that does not match to the resolved hostname, and the configuration // field `hostname_fqdn` isn't set -> we display a warning message about // the future behavior - if !config.Datadog().GetBool("hostname_fqdn") && hostname == h && h != fqdn { + if !pkgconfigsetup.Datadog().GetBool("hostname_fqdn") && hostname == h && h != fqdn { if runtime.GOOS != "windows" { // REMOVEME: This should be removed when the default `hostname_fqdn` is set to true log.Warnf("DEPRECATION NOTICE: The agent resolved your hostname as '%s'. However in a future version, it will be resolved as '%s' by default. To enable the future behavior, please enable the `hostname_fqdn` flag in the configuration. For more information: https://dtdg.co/flag-hostname-fqdn", h, fqdn) diff --git a/pkg/util/installinfo/install_info.go b/pkg/util/installinfo/install_info.go index 0486c1324493e..e7443aff6c8d5 100644 --- a/pkg/util/installinfo/install_info.go +++ b/pkg/util/installinfo/install_info.go @@ -18,7 +18,8 @@ import ( "gopkg.in/yaml.v2" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" @@ -50,12 +51,12 @@ const maxVersionHistoryEntries = 60 // GetFilePath returns the path of the 'install_info' directory relative to the loaded coinfiguration file. The // 'install_info' directory contains information about how the agent was installed. -func GetFilePath(conf config.Reader) string { +func GetFilePath(conf model.Reader) string { return filepath.Join(configUtils.ConfFileDirectory(conf), "install_info") } // Get returns information about how the Agent was installed. -func Get(conf config.Reader) (*InstallInfo, error) { +func Get(conf model.Reader) (*InstallInfo, error) { return getFromPath(GetFilePath(conf)) } @@ -77,8 +78,8 @@ func getFromPath(path string) (*InstallInfo, error) { // LogVersionHistory loads version history file, append new entry if agent version is different than the last entry in the // JSON file, trim the file if too many entries then save the file. func LogVersionHistory() { - versionHistoryFilePath := filepath.Join(config.Datadog().GetString("run_path"), "version-history.json") - installInfoFilePath := GetFilePath(config.Datadog()) + versionHistoryFilePath := filepath.Join(pkgconfigsetup.Datadog().GetString("run_path"), "version-history.json") + installInfoFilePath := GetFilePath(pkgconfigsetup.Datadog()) logVersionHistoryToFile(versionHistoryFilePath, installInfoFilePath, version.AgentVersion, time.Now().UTC()) } diff --git a/pkg/util/kernel/find_headers.go b/pkg/util/kernel/find_headers.go index 8aa9888477528..245e582bad16b 100644 --- a/pkg/util/kernel/find_headers.go +++ b/pkg/util/kernel/find_headers.go @@ -22,7 +22,6 @@ import ( "strings" "sync" - model "github.com/DataDog/agent-payload/v5/process" "github.com/DataDog/datadog-go/v5/statsd" "github.com/DataDog/nikos/types" "golang.org/x/exp/maps" @@ -42,6 +41,22 @@ var versionCodeRegexp = regexp.MustCompile(`^#define[\t ]+LINUX_VERSION_CODE[\t var errReposDirInaccessible = errors.New("unable to access repos directory") +// Copied from https://github.com/DataDog/agent-payload/blob/master/process/connections.pb.go +// to avoid CGO dependency +var kernelHeaderFetchResultName = map[int]string{ + 0: "FetchNotAttempted", + 1: "CustomHeadersFound", + 2: "DefaultHeadersFound", + 3: "SysfsHeadersFound", + 4: "DownloadedHeadersFound", + 5: "DownloadSuccess", + 6: "HostVersionErr", + 7: "DownloadFailure", + 8: "ValidationFailure", + 9: "ReposDirAccessFailure", + 10: "HeadersNotFoundDownloadDisabled", +} + type headerFetchResult int const ( @@ -461,7 +476,7 @@ func submitTelemetry(result headerFetchResult, client statsd.ClientInterface) { khdTags := append(tags, fmt.Sprintf("result:%s", resultTag), - fmt.Sprintf("reason:%s", model.KernelHeaderFetchResult(result).String()), + fmt.Sprintf("reason:%s", kernelHeaderFetchResultName[int(result)]), ) if err := client.Count("datadog.system_probe.kernel_header_fetch.attempted", 1.0, khdTags, 1); err != nil && !errors.Is(err, statsd.ErrNoClient) { diff --git a/pkg/util/kubelet/hostname_test.go b/pkg/util/kubelet/hostname_test.go index e29c1d0f61d41..eb130f43e8e06 100644 --- a/pkg/util/kubelet/hostname_test.go +++ b/pkg/util/kubelet/hostname_test.go @@ -15,7 +15,6 @@ import ( "github.com/stretchr/testify/mock" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" @@ -33,7 +32,7 @@ func (m *kubeUtilMock) GetNodename(_ context.Context) (string, error) { } func TestHostnameProvider(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) ctx := context.Background() mockConfig := configmock.New(t) @@ -66,7 +65,7 @@ func TestHostnameProvider(t *testing.T) { } func TestHostnameProviderInvalid(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) ctx := context.Background() mockConfig := configmock.New(t) diff --git a/pkg/util/kubernetes/apiserver/apiserver.go b/pkg/util/kubernetes/apiserver/apiserver.go index 8a0668a76d4ff..a760626982de1 100644 --- a/pkg/util/kubernetes/apiserver/apiserver.go +++ b/pkg/util/kubernetes/apiserver/apiserver.go @@ -41,7 +41,7 @@ import ( apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -147,9 +147,9 @@ type APIClient struct { func initAPIClient() { globalAPIClient = &APIClient{ - defaultClientTimeout: time.Duration(config.Datadog().GetInt64("kubernetes_apiserver_client_timeout")) * time.Second, - defaultInformerTimeout: time.Duration(config.Datadog().GetInt64("kubernetes_apiserver_informer_client_timeout")) * time.Second, - defaultInformerResyncPeriod: time.Duration(config.Datadog().GetInt64("kubernetes_informers_resync_period")) * time.Second, + defaultClientTimeout: time.Duration(pkgconfigsetup.Datadog().GetInt64("kubernetes_apiserver_client_timeout")) * time.Second, + defaultInformerTimeout: time.Duration(pkgconfigsetup.Datadog().GetInt64("kubernetes_apiserver_informer_client_timeout")) * time.Second, + defaultInformerResyncPeriod: time.Duration(pkgconfigsetup.Datadog().GetInt64("kubernetes_informers_resync_period")) * time.Second, } globalAPIClient.initRetry.SetupRetrier(&retry.Config{ //nolint:errcheck Name: "apiserver", @@ -200,15 +200,15 @@ func WaitForAPIClient(ctx context.Context) (*APIClient, error) { func getClientConfig(timeout time.Duration) (*rest.Config, error) { var clientConfig *rest.Config var err error - cfgPath := config.Datadog().GetString("kubernetes_kubeconfig_path") + cfgPath := pkgconfigsetup.Datadog().GetString("kubernetes_kubeconfig_path") if cfgPath == "" { clientConfig, err = rest.InClusterConfig() - if !config.Datadog().GetBool("kubernetes_apiserver_tls_verify") { + if !pkgconfigsetup.Datadog().GetBool("kubernetes_apiserver_tls_verify") { clientConfig.TLSClientConfig.Insecure = true } - if customCAPath := config.Datadog().GetString("kubernetes_apiserver_ca_path"); customCAPath != "" { + if customCAPath := pkgconfigsetup.Datadog().GetString("kubernetes_apiserver_ca_path"); customCAPath != "" { clientConfig.TLSClientConfig.CAFile = customCAPath } @@ -225,7 +225,7 @@ func getClientConfig(timeout time.Duration) (*rest.Config, error) { } } - if config.Datadog().GetBool("kubernetes_apiserver_use_protobuf") { + if pkgconfigsetup.Datadog().GetBool("kubernetes_apiserver_use_protobuf") { clientConfig.ContentType = "application/vnd.kubernetes.protobuf" } @@ -367,20 +367,20 @@ func (c *APIClient) connect() error { // Creating informers c.InformerFactory = c.GetInformerWithOptions(nil) - if config.Datadog().GetBool("admission_controller.enabled") || - config.Datadog().GetBool("compliance_config.enabled") || - config.Datadog().GetBool("orchestrator_explorer.enabled") || - config.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") || - config.Datadog().GetBool("external_metrics_provider.wpa_controller") || - config.Datadog().GetBool("cluster_checks.enabled") || - config.Datadog().GetBool("autoscaling.workload.enabled") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.enabled") || + pkgconfigsetup.Datadog().GetBool("compliance_config.enabled") || + pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.enabled") || + pkgconfigsetup.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") || + pkgconfigsetup.Datadog().GetBool("external_metrics_provider.wpa_controller") || + pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") || + pkgconfigsetup.Datadog().GetBool("autoscaling.workload.enabled") { c.DynamicInformerFactory = dynamicinformer.NewDynamicSharedInformerFactory(c.DynamicInformerCl, c.defaultInformerResyncPeriod) } - if config.Datadog().GetBool("admission_controller.enabled") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.enabled") { nameFieldkey := "metadata.name" optionsForService := func(options *metav1.ListOptions) { - options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, config.Datadog().GetString("admission_controller.certificate.secret_name")).String() + options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name")).String() } c.CertificateSecretInformerFactory = c.GetInformerWithOptions( nil, @@ -389,7 +389,7 @@ func (c *APIClient) connect() error { ) optionsForWebhook := func(options *metav1.ListOptions) { - options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, config.Datadog().GetString("admission_controller.webhook_name")).String() + options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, pkgconfigsetup.Datadog().GetString("admission_controller.webhook_name")).String() } c.WebhookConfigInformerFactory = c.GetInformerWithOptions( nil, @@ -417,7 +417,7 @@ type MetadataMapperBundle struct { func NewMetadataMapperBundle() *MetadataMapperBundle { return &MetadataMapperBundle{ Services: apiv1.NewNamespacesPodsStringsSet(), - mapOnIP: config.Datadog().GetBool("kubernetes_map_services_on_ip"), + mapOnIP: pkgconfigsetup.Datadog().GetBool("kubernetes_map_services_on_ip"), } } @@ -449,7 +449,7 @@ func (c *APIClient) GetTokenFromConfigmap(token string) (string, time.Time, erro namespace := common.GetResourcesNamespace() nowTs := time.Now() - configMapDCAToken := config.Datadog().GetString("cluster_agent.token_name") + configMapDCAToken := pkgconfigsetup.Datadog().GetString("cluster_agent.token_name") cmEvent, err := c.getOrCreateConfigMap(configMapDCAToken, namespace) if err != nil { // we do not process event if we can't interact with the CM. @@ -488,7 +488,7 @@ func (c *APIClient) GetTokenFromConfigmap(token string) (string, time.Time, erro // sets its collected timestamp in the ConfigMap `configmaptokendca` func (c *APIClient) UpdateTokenInConfigmap(token, tokenValue string, timestamp time.Time) error { namespace := common.GetResourcesNamespace() - configMapDCAToken := config.Datadog().GetString("cluster_agent.token_name") + configMapDCAToken := pkgconfigsetup.Datadog().GetString("cluster_agent.token_name") tokenConfigMap, err := c.getOrCreateConfigMap(configMapDCAToken, namespace) if err != nil { return err diff --git a/pkg/util/kubernetes/apiserver/common/common.go b/pkg/util/kubernetes/apiserver/common/common.go index e9999f8f97bdd..d2acbcc303c0e 100644 --- a/pkg/util/kubernetes/apiserver/common/common.go +++ b/pkg/util/kubernetes/apiserver/common/common.go @@ -18,7 +18,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/setup/constants" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -29,7 +30,7 @@ const ( // GetResourcesNamespace is used to fetch the namespace of the resources used by the Kubernetes check (e.g. Leader Election, Event collection). func GetResourcesNamespace() string { - namespace := config.Datadog().GetString("kube_resources_namespace") + namespace := pkgconfigsetup.Datadog().GetString("kube_resources_namespace") if namespace != "" { return namespace } @@ -63,7 +64,7 @@ func GetKubeSystemUID(coreClient corev1.CoreV1Interface) (string, error) { // It first checks if the CM exists, in which case it uses the ID it contains // It thus requires get, create, and update perms on configmaps in the cluster-agent's namespace func GetOrCreateClusterID(coreClient corev1.CoreV1Interface) (string, error) { - cacheClusterIDKey := cache.BuildAgentKey(config.ClusterIDCacheKey) + cacheClusterIDKey := cache.BuildAgentKey(constants.ClusterIDCacheKey) x, found := cache.Cache.Get(cacheClusterIDKey) if found { return x.(string), nil diff --git a/pkg/util/kubernetes/apiserver/controllers/controller_util.go b/pkg/util/kubernetes/apiserver/controllers/controller_util.go index ab277b7f040fb..adca9baaa6915 100644 --- a/pkg/util/kubernetes/apiserver/controllers/controller_util.go +++ b/pkg/util/kubernetes/apiserver/controllers/controller_util.go @@ -22,7 +22,7 @@ import ( datadogclient "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/autoscalers" @@ -44,8 +44,8 @@ func newAutoscalersController(client kubernetes.Interface, h.toStore.data = make(map[string]custommetrics.ExternalMetricValue) - gcPeriodSeconds := config.Datadog().GetInt("hpa_watcher_gc_period") - refreshPeriod := config.Datadog().GetInt("external_metrics_provider.refresh_period") + gcPeriodSeconds := pkgconfigsetup.Datadog().GetInt("hpa_watcher_gc_period") + refreshPeriod := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.refresh_period") if gcPeriodSeconds <= 0 || refreshPeriod <= 0 { return nil, fmt.Errorf("tickers must be strictly positive in the autoscalersController"+ diff --git a/pkg/util/kubernetes/apiserver/controllers/controllers.go b/pkg/util/kubernetes/apiserver/controllers/controllers.go index 0b514a7515846..174e793165540 100644 --- a/pkg/util/kubernetes/apiserver/controllers/controllers.go +++ b/pkg/util/kubernetes/apiserver/controllers/controllers.go @@ -24,7 +24,7 @@ import ( datadogclient "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -43,21 +43,21 @@ type controllerFuncs struct { var controllerCatalog = map[controllerName]controllerFuncs{ metadataControllerName: { - func() bool { return config.Datadog().GetBool("kubernetes_collect_metadata_tags") }, + func() bool { return pkgconfigsetup.Datadog().GetBool("kubernetes_collect_metadata_tags") }, startMetadataController, }, autoscalersControllerName: { func() bool { - return config.Datadog().GetBool("external_metrics_provider.enabled") && !config.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") + return pkgconfigsetup.Datadog().GetBool("external_metrics_provider.enabled") && !pkgconfigsetup.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") }, startAutoscalersController, }, servicesControllerName: { - func() bool { return config.Datadog().GetBool("cluster_checks.enabled") }, + func() bool { return pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") }, registerServicesInformer, }, endpointsControllerName: { - func() bool { return config.Datadog().GetBool("cluster_checks.enabled") }, + func() bool { return pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") }, registerEndpointsInformer, }, } @@ -156,7 +156,7 @@ func startAutoscalersController(ctx *ControllerContext, c chan error) { return } - if config.Datadog().GetBool("external_metrics_provider.wpa_controller") { + if pkgconfigsetup.Datadog().GetBool("external_metrics_provider.wpa_controller") { go autoscalersController.runWPA(ctx.StopCh, ctx.DynamicClient, ctx.DynamicInformerFactory) } diff --git a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go index 37396c33c0e28..45ea62c6e2d26 100644 --- a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go +++ b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go @@ -16,9 +16,18 @@ import ( "sync" "time" + "golang.org/x/mod/semver" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/leaderelection" + rl "k8s.io/client-go/tools/leaderelection/resourcelock" + "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" configmaplock "github.com/DataDog/datadog-agent/internal/third_party/client-go/tools/leaderelection/resourcelock" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -26,14 +35,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" - "golang.org/x/mod/semver" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/discovery" - coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" - corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/leaderelection" - rl "k8s.io/client-go/tools/leaderelection/resourcelock" ) const ( @@ -75,9 +76,9 @@ type LeaderEngine struct { func newLeaderEngine(ctx context.Context) *LeaderEngine { return &LeaderEngine{ ctx: ctx, - LeaseName: config.Datadog().GetString("leader_lease_name"), + LeaseName: pkgconfigsetup.Datadog().GetString("leader_lease_name"), LeaderNamespace: common.GetResourcesNamespace(), - ServiceName: config.Datadog().GetString("cluster_agent.kubernetes_service_name"), + ServiceName: pkgconfigsetup.Datadog().GetString("cluster_agent.kubernetes_service_name"), leaderMetric: metrics.NewLeaderMetric(), subscribers: []chan struct{}{}, LeaseDuration: defaultLeaderLeaseDuration, @@ -139,7 +140,7 @@ func (le *LeaderEngine) init() error { } log.Debugf("Init LeaderEngine with HolderIdentity: %q", le.HolderIdentity) - leaseDuration := config.Datadog().GetInt("leader_lease_duration") + leaseDuration := pkgconfigsetup.Datadog().GetInt("leader_lease_duration") if leaseDuration > 0 { le.LeaseDuration = time.Duration(leaseDuration) * time.Second } else { @@ -307,7 +308,7 @@ func detectLeases(client discovery.DiscoveryInterface) (bool, error) { // CanUseLeases returns if leases can be used for leader election. If the resource is defined in the config // It uses it. Otherwise it uses the discovery client for leader election. func CanUseLeases(client discovery.DiscoveryInterface) (bool, error) { - resourceType := config.Datadog().GetString("leader_election_default_resource") + resourceType := pkgconfigsetup.Datadog().GetString("leader_election_default_resource") if resourceType == "lease" || resourceType == "leases" { return true, nil } else if resourceType == "configmap" || resourceType == "configmaps" { @@ -323,7 +324,7 @@ func CanUseLeases(client discovery.DiscoveryInterface) (bool, error) { func getLeaseLeaderElectionRecord(client coordinationv1.CoordinationV1Interface) (rl.LeaderElectionRecord, error) { var empty rl.LeaderElectionRecord - lease, err := client.Leases(common.GetResourcesNamespace()).Get(context.TODO(), config.Datadog().GetString("leader_lease_name"), metav1.GetOptions{}) + lease, err := client.Leases(common.GetResourcesNamespace()).Get(context.TODO(), pkgconfigsetup.Datadog().GetString("leader_lease_name"), metav1.GetOptions{}) if err != nil { return empty, err } @@ -334,7 +335,7 @@ func getLeaseLeaderElectionRecord(client coordinationv1.CoordinationV1Interface) func getConfigMapLeaderElectionRecord(client corev1.CoreV1Interface) (rl.LeaderElectionRecord, error) { var led rl.LeaderElectionRecord - leaderElectionCM, err := client.ConfigMaps(common.GetResourcesNamespace()).Get(context.TODO(), config.Datadog().GetString("leader_lease_name"), metav1.GetOptions{}) + leaderElectionCM, err := client.ConfigMaps(common.GetResourcesNamespace()).Get(context.TODO(), pkgconfigsetup.Datadog().GetString("leader_lease_name"), metav1.GetOptions{}) if err != nil { return led, err } diff --git a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_engine.go b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_engine.go index 66e19775eecb9..d134c95f17eb2 100644 --- a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_engine.go +++ b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_engine.go @@ -24,7 +24,7 @@ import ( "k8s.io/client-go/tools/record" configmaplock "github.com/DataDog/datadog-agent/internal/third_party/client-go/tools/leaderelection/resourcelock" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -192,7 +192,7 @@ func (le *LeaderEngine) newElection() (*ld.LeaderElector, error) { electionConfig := ld.LeaderElectionConfig{ // ReleaseOnCancel updates the leader election lock when the main context is canceled by setting the Lease Duration to 1s. // It allows the next DCA to initialize faster. However, it performs a network call on shutdown. - ReleaseOnCancel: config.Datadog().GetBool("leader_election_release_on_shutdown"), + ReleaseOnCancel: pkgconfigsetup.Datadog().GetBool("leader_election_release_on_shutdown"), Lock: leaderElectorInterface, LeaseDuration: le.LeaseDuration, RenewDeadline: le.LeaseDuration / 2, diff --git a/pkg/util/kubernetes/apiserver/util.go b/pkg/util/kubernetes/apiserver/util.go index 8ed06c75c4d07..1790de6682038 100644 --- a/pkg/util/kubernetes/apiserver/util.go +++ b/pkg/util/kubernetes/apiserver/util.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/watermarkpodautoscaler/api/v1alpha1" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -28,7 +28,7 @@ import ( // An extra timeout duration can be provided depending on the informer func SyncInformers(informers map[InformerName]cache.SharedInformer, extraWait time.Duration) error { var g errgroup.Group - timeoutConfig := config.Datadog().GetDuration("kube_cache_sync_timeout_seconds") * time.Second + timeoutConfig := pkgconfigsetup.Datadog().GetDuration("kube_cache_sync_timeout_seconds") * time.Second // syncTimeout can be used to wait for the kubernetes client-go cache to sync. // It cannot be retrieved at the package-level due to the package being imported before configs are loaded. syncTimeout := timeoutConfig + extraWait @@ -60,7 +60,7 @@ type syncInformerResult struct { func SyncInformersReturnErrors(informers map[InformerName]cache.SharedInformer, extraWait time.Duration) map[InformerName]error { resultChan := make(chan syncInformerResult) errors := make(map[InformerName]error, len(informers)) - timeoutConfig := config.Datadog().GetDuration("kube_cache_sync_timeout_seconds") * time.Second + timeoutConfig := pkgconfigsetup.Datadog().GetDuration("kube_cache_sync_timeout_seconds") * time.Second // syncTimeout can be used to wait for the kubernetes client-go cache to sync. // It cannot be retrieved at the package-level due to the package being imported before configs are loaded. syncTimeout := timeoutConfig + extraWait diff --git a/pkg/util/kubernetes/autoscalers/datadogexternal.go b/pkg/util/kubernetes/autoscalers/datadogexternal.go index 7903dbd75a84e..11e44050650ed 100644 --- a/pkg/util/kubernetes/autoscalers/datadogexternal.go +++ b/pkg/util/kubernetes/autoscalers/datadogexternal.go @@ -19,7 +19,7 @@ import ( "gopkg.in/zorkian/go-datadog-api.v2" utilserror "k8s.io/apimachinery/pkg/util/errors" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/telemetry" le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -76,7 +76,7 @@ var ( func getMinRemainingRequestsTracker() *minTracker { once.Do(func() { - refreshPeriod := config.Datadog().GetInt("external_metrics_provider.refresh_period") + refreshPeriod := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.refresh_period") expiryDuration := 2 * refreshPeriod minRemainingRequestsTracker = newMinTracker(time.Duration(time.Duration(expiryDuration) * time.Second)) }) @@ -92,6 +92,14 @@ func isRateLimitError(err error) bool { return strings.Contains(err.Error(), "429 Too Many Requests") } +// isUnprocessableEntityError is a helper function that checks if the received error is an unprocessable entity error +func isUnprocessableEntityError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "422 Unprocessable Entity") +} + // queryDatadogExternal converts the metric name and labels from the Ref format into a Datadog metric. // It returns the last value for a bucket of 5 minutes, func (p *Processor) queryDatadogExternal(ddQueries []string, timeWindow time.Duration) (map[string]Point, error) { @@ -108,6 +116,8 @@ func (p *Processor) queryDatadogExternal(ddQueries []string, timeWindow time.Dur if err != nil { if isRateLimitError(err) { ddRequests.Inc("rate_limit_error", le.JoinLeaderValue) + } else if isUnprocessableEntityError(err) { + ddRequests.Inc("unprocessable_entity_error", le.JoinLeaderValue) } else { ddRequests.Inc("error", le.JoinLeaderValue) } diff --git a/pkg/util/kubernetes/autoscalers/datadogexternal_test.go b/pkg/util/kubernetes/autoscalers/datadogexternal_test.go index 1f72345f02d08..b806cfee4487a 100644 --- a/pkg/util/kubernetes/autoscalers/datadogexternal_test.go +++ b/pkg/util/kubernetes/autoscalers/datadogexternal_test.go @@ -17,7 +17,7 @@ import ( "gopkg.in/zorkian/go-datadog-api.v2" datadogclientmock "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/mock" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -251,7 +251,7 @@ func TestDatadogExternalQuery(t *testing.T) { datadogClientComp := datadogclientmock.New(t).Comp datadogClientComp.SetQueryMetricsFunc(test.queryfunc) p := Processor{datadogClient: datadogClientComp} - points, err := p.queryDatadogExternal(test.metricName, time.Duration(config.Datadog().GetInt64("external_metrics_provider.bucket_size"))*time.Second) + points, err := p.queryDatadogExternal(test.metricName, time.Duration(pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.bucket_size"))*time.Second) if test.err != nil { require.EqualError(t, test.err, err.Error()) } @@ -303,3 +303,39 @@ func TestIsRateLimitError(t *testing.T) { }) } } + +func TestIsUnprocessableEntityError(t *testing.T) { + + tests := []struct { + name string + err error + isUnprocessableEntity bool + }{ + { + name: "nil error", + err: nil, + isUnprocessableEntity: false, + }, + { + name: "empty error", + err: errors.New(""), + isUnprocessableEntity: false, + }, + { + name: "unprocessable entity error", + err: errors.New("422 Unprocessable Entity"), + isUnprocessableEntity: true, + }, + { + name: "unprocessable entity error variant", + err: errors.New("API error 422 Unprocessable Entity: "), + isUnprocessableEntity: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, isUnprocessableEntityError(test.err), test.isUnprocessableEntity) + }) + } +} diff --git a/pkg/util/kubernetes/autoscalers/processor.go b/pkg/util/kubernetes/autoscalers/processor.go index 3440c6ba01e2d..3ecb74adf1864 100644 --- a/pkg/util/kubernetes/autoscalers/processor.go +++ b/pkg/util/kubernetes/autoscalers/processor.go @@ -23,7 +23,7 @@ import ( datadogclientcomp "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -55,7 +55,7 @@ type queryResponse struct { // NewProcessor returns a new Processor func NewProcessor(datadogCl datadogclientcomp.Component) *Processor { - externalMaxAge := math.Max(config.Datadog().GetFloat64("external_metrics_provider.max_age"), 3*config.Datadog().GetFloat64("external_metrics_provider.rollup")) + externalMaxAge := math.Max(pkgconfigsetup.Datadog().GetFloat64("external_metrics_provider.max_age"), 3*pkgconfigsetup.Datadog().GetFloat64("external_metrics_provider.rollup")) return &Processor{ externalMaxAge: time.Duration(externalMaxAge) * time.Second, datadogClient: datadogCl, @@ -108,24 +108,24 @@ func (p *Processor) ProcessWPAs(wpa *v1alpha1.WatermarkPodAutoscaler) map[string // GetDefaultMaxAge returns the configured default max age. func GetDefaultMaxAge() time.Duration { - return time.Duration(config.Datadog().GetInt64("external_metrics_provider.max_age")) * time.Second + return time.Duration(pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.max_age")) * time.Second } // GetDefaultTimeWindow returns the configured default time window func GetDefaultTimeWindow() time.Duration { - return time.Duration(config.Datadog().GetInt64("external_metrics_provider.bucket_size")) * time.Second + return time.Duration(pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.bucket_size")) * time.Second } // GetDefaultMaxTimeWindow returns the configured max time window func GetDefaultMaxTimeWindow() time.Duration { - return time.Duration(config.Datadog().GetInt64("external_metrics_provider.max_time_window")) * time.Second + return time.Duration(pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.max_time_window")) * time.Second } // UpdateExternalMetrics does the validation and processing of the ExternalMetrics // TODO if a metric's ts in emList is too recent, no need to add it to the batchUpdate. func (p *Processor) UpdateExternalMetrics(emList map[string]custommetrics.ExternalMetricValue) (updated map[string]custommetrics.ExternalMetricValue) { - aggregator := config.Datadog().GetString("external_metrics.aggregator") - rollup := config.Datadog().GetInt("external_metrics_provider.rollup") + aggregator := pkgconfigsetup.Datadog().GetString("external_metrics.aggregator") + rollup := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.rollup") maxAge := int64(p.externalMaxAge.Seconds()) var err error updated = make(map[string]custommetrics.ExternalMetricValue) @@ -221,7 +221,7 @@ func isURLBeyondLimits(uriLength, numBuckets int) (bool, error) { return true, fmt.Errorf("Query is too long, could yield a server side error. Dropping") } - chunkSize := config.Datadog().GetInt("external_metrics_provider.chunk_size") + chunkSize := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.chunk_size") return uriLength >= maxCharactersPerChunk || numBuckets >= chunkSize, nil } diff --git a/pkg/util/kubernetes/clustername/clustername.go b/pkg/util/kubernetes/clustername/clustername.go index d6b283aff3d7a..094dbf9542b84 100644 --- a/pkg/util/kubernetes/clustername/clustername.go +++ b/pkg/util/kubernetes/clustername/clustername.go @@ -14,8 +14,9 @@ import ( "strings" "sync" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/setup/constants" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/azure" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" @@ -74,7 +75,7 @@ func getClusterName(ctx context.Context, data *clusterNameData, hostname string) } if !data.initDone { - data.clusterName = config.Datadog().GetString("cluster_name") + data.clusterName = pkgconfigsetup.Datadog().GetString("cluster_name") if data.clusterName != "" { log.Infof("Got cluster name %s from config", data.clusterName) // the host alias "hostname-clustername" must not exceed 255 chars @@ -154,7 +155,7 @@ func GetClusterName(ctx context.Context, hostname string) string { // "enabled_rfc1123_compliant_cluster_name_tag" is set to "true" // this allow to limit the risk of breaking user that currently rely on previous `kube_cluster_name` tag value. func GetClusterNameTagValue(ctx context.Context, hostname string) string { - if config.Datadog().GetBool("enabled_rfc1123_compliant_cluster_name_tag") { + if pkgconfigsetup.Datadog().GetBool("enabled_rfc1123_compliant_cluster_name_tag") { return GetRFC1123CompliantClusterName(ctx, hostname) } return GetClusterName(ctx, hostname) @@ -187,7 +188,7 @@ func ResetClusterName() { // This variable should come from a configmap, created by the cluster-agent. // This function is meant for the node-agent to call (cluster-agent should call GetOrCreateClusterID) func GetClusterID() (string, error) { - cacheClusterIDKey := cache.BuildAgentKey(config.ClusterIDCacheKey) + cacheClusterIDKey := cache.BuildAgentKey(constants.ClusterIDCacheKey) if cachedClusterID, found := cache.Cache.Get(cacheClusterIDKey); found { return cachedClusterID.(string), nil } diff --git a/pkg/util/kubernetes/clustername/clustername_test.go b/pkg/util/kubernetes/clustername/clustername_test.go index f9c0ca366f8c9..674ab904ae23f 100644 --- a/pkg/util/kubernetes/clustername/clustername_test.go +++ b/pkg/util/kubernetes/clustername/clustername_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) @@ -19,7 +18,7 @@ import ( func TestGetClusterName(t *testing.T) { ctx := context.Background() mockConfig := configmock.New(t) - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) data := newClusterNameData() testClusterName := "laika" diff --git a/pkg/util/kubernetes/hostinfo/cluster_name_from_node_label.go b/pkg/util/kubernetes/hostinfo/cluster_name_from_node_label.go index 73558be8c801b..7e2470d590f11 100644 --- a/pkg/util/kubernetes/hostinfo/cluster_name_from_node_label.go +++ b/pkg/util/kubernetes/hostinfo/cluster_name_from_node_label.go @@ -8,7 +8,7 @@ package hostinfo import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ( @@ -41,7 +41,7 @@ func (n *NodeInfo) GetNodeClusterNameLabel(ctx context.Context, clusterName stri var clusterNameLabelKeys []clusterNameLabelType // check if a node label has been added on the config - if customLabels := config.Datadog().GetString("kubernetes_node_label_as_cluster_name"); customLabels != "" { + if customLabels := pkgconfigsetup.Datadog().GetString("kubernetes_node_label_as_cluster_name"); customLabels != "" { clusterNameLabelKeys = append(clusterNameLabelKeys, clusterNameLabelType{key: customLabels, shouldOverride: true}) } else { // Use default configuration diff --git a/pkg/util/kubernetes/hostinfo/no_tags.go b/pkg/util/kubernetes/hostinfo/no_tags.go index f81cd46cbd673..a7648df88c274 100644 --- a/pkg/util/kubernetes/hostinfo/no_tags.go +++ b/pkg/util/kubernetes/hostinfo/no_tags.go @@ -10,14 +10,14 @@ package hostinfo import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // KubeNodeTagsProvider allows computing node tags based on the user configurations for node labels and annotations as tags type KubeNodeTagsProvider struct{} // NewKubeNodeTagsProvider creates and returns a new kube node tags provider object -func NewKubeNodeTagsProvider(_ config.Reader) KubeNodeTagsProvider { +func NewKubeNodeTagsProvider(_ model.Reader) KubeNodeTagsProvider { return KubeNodeTagsProvider{} } diff --git a/pkg/util/kubernetes/hostinfo/node_annotations.go b/pkg/util/kubernetes/hostinfo/node_annotations.go index 04188fea28640..b65f217edf048 100644 --- a/pkg/util/kubernetes/hostinfo/node_annotations.go +++ b/pkg/util/kubernetes/hostinfo/node_annotations.go @@ -10,7 +10,7 @@ package hostinfo import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" ) @@ -27,7 +27,7 @@ func GetNodeAnnotations(ctx context.Context) (map[string]string, error) { return nil, err } - if config.Datadog().GetBool("cluster_agent.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") { cl, err := clusteragent.GetClusterAgentClient() if err != nil { return nil, err diff --git a/pkg/util/kubernetes/hostinfo/node_labels.go b/pkg/util/kubernetes/hostinfo/node_labels.go index 23f0ce819d435..52afb1a15e64c 100644 --- a/pkg/util/kubernetes/hostinfo/node_labels.go +++ b/pkg/util/kubernetes/hostinfo/node_labels.go @@ -10,7 +10,7 @@ package hostinfo import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" ) @@ -49,7 +49,7 @@ func (n *NodeInfo) GetNodeLabels(ctx context.Context) (map[string]string, error) return nil, err } - if config.Datadog().GetBool("cluster_agent.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") { cl, err := n.getClusterAgentFunc() if err != nil { return nil, err diff --git a/pkg/util/kubernetes/hostinfo/tags.go b/pkg/util/kubernetes/hostinfo/tags.go index 6fc7e4314b722..fa2a8e47d1c89 100644 --- a/pkg/util/kubernetes/hostinfo/tags.go +++ b/pkg/util/kubernetes/hostinfo/tags.go @@ -13,7 +13,7 @@ import ( k8smetadata "github.com/DataDog/datadog-agent/comp/core/tagger/k8s_metadata" "github.com/DataDog/datadog-agent/comp/core/tagger/taglist" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" configutils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -25,7 +25,7 @@ type KubeNodeTagsProvider struct { } // NewKubeNodeTagsProvider creates and returns a new kube node tags provider object -func NewKubeNodeTagsProvider(conf config.Reader) KubeNodeTagsProvider { +func NewKubeNodeTagsProvider(conf model.Reader) KubeNodeTagsProvider { return KubeNodeTagsProvider{configutils.GetMetadataAsTags(conf)} } diff --git a/pkg/util/kubernetes/kubelet/json.go b/pkg/util/kubernetes/kubelet/json.go index 3a3b103be3501..caba0719737f3 100644 --- a/pkg/util/kubernetes/kubelet/json.go +++ b/pkg/util/kubernetes/kubelet/json.go @@ -13,7 +13,7 @@ import ( jsoniter "github.com/json-iterator/go" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // jsoniterConfig mirrors jsoniter.ConfigFastest @@ -34,7 +34,7 @@ type podUnmarshaller struct { func newPodUnmarshaller() *podUnmarshaller { pu := &podUnmarshaller{ - podExpirationDuration: config.Datadog().GetDuration("kubernetes_pod_expiration_duration") * time.Second, + podExpirationDuration: pkgconfigsetup.Datadog().GetDuration("kubernetes_pod_expiration_duration") * time.Second, timeNowFunction: time.Now, } diff --git a/pkg/util/kubernetes/kubelet/kubelet.go b/pkg/util/kubernetes/kubelet/kubelet.go index 62fe74f9183af..b1622e27bf2ea 100644 --- a/pkg/util/kubernetes/kubelet/kubelet.go +++ b/pkg/util/kubernetes/kubelet/kubelet.go @@ -15,7 +15,7 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -89,11 +89,11 @@ func (ku *KubeUtil) init() error { func NewKubeUtil() *KubeUtil { ku := &KubeUtil{ rawConnectionInfo: make(map[string]string), - podListCacheDuration: config.Datadog().GetDuration("kubelet_cache_pods_duration") * time.Second, + podListCacheDuration: pkgconfigsetup.Datadog().GetDuration("kubelet_cache_pods_duration") * time.Second, podUnmarshaller: newPodUnmarshaller(), } - waitOnMissingContainer := config.Datadog().GetDuration("kubelet_wait_on_missing_container") + waitOnMissingContainer := pkgconfigsetup.Datadog().GetDuration("kubelet_wait_on_missing_container") if waitOnMissingContainer > 0 { ku.waitOnMissingContainer = waitOnMissingContainer * time.Second } diff --git a/pkg/util/kubernetes/kubelet/kubelet_client.go b/pkg/util/kubernetes/kubelet/kubelet_client.go index fa3d3c3cda249..32f3745fe79be 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_client.go +++ b/pkg/util/kubernetes/kubelet/kubelet_client.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -164,16 +164,16 @@ func getKubeletClient(ctx context.Context) (*kubeletClient, error) { var err error kubeletTimeout := 30 * time.Second - kubeletProxyEnabled := config.Datadog().GetBool("eks_fargate") - kubeletHost := config.Datadog().GetString("kubernetes_kubelet_host") - kubeletHTTPSPort := config.Datadog().GetInt("kubernetes_https_kubelet_port") - kubeletHTTPPort := config.Datadog().GetInt("kubernetes_http_kubelet_port") - kubeletTLSVerify := config.Datadog().GetBool("kubelet_tls_verify") - kubeletCAPath := config.Datadog().GetString("kubelet_client_ca") - kubeletTokenPath := config.Datadog().GetString("kubelet_auth_token_path") - kubeletClientCertPath := config.Datadog().GetString("kubelet_client_crt") - kubeletClientKeyPath := config.Datadog().GetString("kubelet_client_key") - kubeletNodeName := config.Datadog().Get("kubernetes_kubelet_nodename") + kubeletProxyEnabled := pkgconfigsetup.Datadog().GetBool("eks_fargate") + kubeletHost := pkgconfigsetup.Datadog().GetString("kubernetes_kubelet_host") + kubeletHTTPSPort := pkgconfigsetup.Datadog().GetInt("kubernetes_https_kubelet_port") + kubeletHTTPPort := pkgconfigsetup.Datadog().GetInt("kubernetes_http_kubelet_port") + kubeletTLSVerify := pkgconfigsetup.Datadog().GetBool("kubelet_tls_verify") + kubeletCAPath := pkgconfigsetup.Datadog().GetString("kubelet_client_ca") + kubeletTokenPath := pkgconfigsetup.Datadog().GetString("kubelet_auth_token_path") + kubeletClientCertPath := pkgconfigsetup.Datadog().GetString("kubelet_client_crt") + kubeletClientKeyPath := pkgconfigsetup.Datadog().GetString("kubelet_client_key") + kubeletNodeName := pkgconfigsetup.Datadog().Get("kubernetes_kubelet_nodename") var kubeletPathPrefix string var kubeletToken string @@ -209,7 +209,7 @@ func getKubeletClient(ctx context.Context) (*kubeletClient, error) { } kubeletHTTPSPort = int(httpsPort) - if config.Datadog().Get("kubernetes_kubelet_nodename") != "" { + if pkgconfigsetup.Datadog().Get("kubernetes_kubelet_nodename") != "" { kubeletPathPrefix = fmt.Sprintf("/api/v1/nodes/%s/proxy", kubeletNodeName) apiServerIP := os.Getenv("KUBERNETES_SERVICE_HOST") diff --git a/pkg/util/kubernetes/kubelet/kubelet_orchestrator_test.go b/pkg/util/kubernetes/kubelet/kubelet_orchestrator_test.go index 88e49edb841c1..83d84de59cda6 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_orchestrator_test.go +++ b/pkg/util/kubernetes/kubelet/kubelet_orchestrator_test.go @@ -15,8 +15,9 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) type KubeletOrchestratorTestSuite struct { @@ -85,14 +86,15 @@ func (suite *KubeletOrchestratorTestSuite) TestGetRawLocalPodList() { } func TestKubeletOrchestratorTestSuite(t *testing.T) { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "trace", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) suite.Run(t, new(KubeletOrchestratorTestSuite)) } diff --git a/pkg/util/kubernetes/kubelet/kubelet_test.go b/pkg/util/kubernetes/kubelet/kubelet_test.go index eacb28eaaff47..5ced2ecf98817 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_test.go +++ b/pkg/util/kubernetes/kubelet/kubelet_test.go @@ -28,10 +28,11 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) const ( @@ -829,14 +830,15 @@ func (suite *KubeletTestSuite) TestPodListExpire() { } func TestKubeletTestSuite(t *testing.T) { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "trace", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) suite.Run(t, new(KubeletTestSuite)) } diff --git a/pkg/util/log/log_podman_util.go b/pkg/util/log/log_podman_util.go new file mode 100644 index 0000000000000..4327572820cff --- /dev/null +++ b/pkg/util/log/log_podman_util.go @@ -0,0 +1,27 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import ( + "strings" +) + +// The paths below are set in podman code and cannot be modified by the user. +// Ref: https://github.com/containers/podman/blob/7c38ee756592d95e718967fcd3983b81abd95e76/test/e2e/run_transient_test.go#L19-L45 +const ( + sqlDBSuffix string = "/storage/db.sql" + boltDBSuffix string = "/storage/libpod/bolt_state.db" +) + +// ExtractPodmanRootDirFromDBPath extracts the podman base path for the containers directory based on the user-provided `podman_db_path`. +func ExtractPodmanRootDirFromDBPath(podmanDBPath string) string { + if strings.HasSuffix(podmanDBPath, sqlDBSuffix) { + return strings.TrimSuffix(podmanDBPath, sqlDBSuffix) + } else if strings.HasSuffix(podmanDBPath, boltDBSuffix) { + return strings.TrimSuffix(podmanDBPath, boltDBSuffix) + } + return "" +} diff --git a/pkg/util/log/log_podman_util_test.go b/pkg/util/log/log_podman_util_test.go new file mode 100644 index 0000000000000..13e9a601b2d0d --- /dev/null +++ b/pkg/util/log/log_podman_util_test.go @@ -0,0 +1,33 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_ExtractPodmanRootDirFromDBPath(t *testing.T) { + testCases := []struct { + name string + input string + expected string + }{ + {"Rootless & BoltDB", "/data/containers_tomcat/storage/libpod/bolt_state.db", "/data/containers_tomcat"}, + {"Rootfull & BoltDB", "/var/lib/containers/storage/libpod/bolt_state.db", "/var/lib/containers"}, + {"Rootless & SQLite", "/home/ubuntu/.local/share/containers/storage/db.sql", "/home/ubuntu/.local/share/containers"}, + {"Rootfull & SQLite", "/var/lib/containers/storage/db.sql", "/var/lib/containers"}, + {"No matching suffix", "/foo/bar/baz", ""}, + } + + for _, testCase := range testCases { + output := ExtractPodmanRootDirFromDBPath(testCase.input) + assert.Equal(t, testCase.expected, output, fmt.Sprintf("%s: Expected %s but output is %s for input %s", testCase.name, testCase.expected, output, testCase.input)) + } + +} diff --git a/pkg/util/pdhutil/pdhcounter.go b/pkg/util/pdhutil/pdhcounter.go index 95ddc1b6fbb0b..a92e39fd21394 100644 --- a/pkg/util/pdhutil/pdhcounter.go +++ b/pkg/util/pdhutil/pdhcounter.go @@ -9,7 +9,7 @@ package pdhutil import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "golang.org/x/sys/windows" @@ -118,7 +118,7 @@ func (counter *pdhCounter) ShouldInit() bool { // already initialized return false } - var initFailLimit = config.Datadog().GetInt("windows_counter_init_failure_limit") + var initFailLimit = pkgconfigsetup.Datadog().GetInt("windows_counter_init_failure_limit") if initFailLimit > 0 && counter.initFailCount >= initFailLimit { counter.initError = fmt.Errorf("counter exceeded the maximum number of failed initialization attempts. This error indicates that the Windows performance counter database may need to be rebuilt") // attempts exceeded @@ -134,7 +134,7 @@ func (counter *pdhCounter) SetInitError(err error) error { } counter.initFailCount++ - var initFailLimit = config.Datadog().GetInt("windows_counter_init_failure_limit") + var initFailLimit = pkgconfigsetup.Datadog().GetInt("windows_counter_init_failure_limit") if initFailLimit > 0 && counter.initFailCount >= initFailLimit { err = fmt.Errorf("%v. Counter exceeded the maximum number of failed initialization attempts", err) } else if initFailLimit > 0 { diff --git a/pkg/util/pdhutil/pdhhelper.go b/pkg/util/pdhutil/pdhhelper.go index 48a43f4487b58..370c320f6e818 100644 --- a/pkg/util/pdhutil/pdhhelper.go +++ b/pkg/util/pdhutil/pdhhelper.go @@ -18,7 +18,7 @@ import ( "go.uber.org/atomic" "golang.org/x/sys/windows" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -92,7 +92,7 @@ func refreshPdhObjectCache(forceRefresh bool) (didrefresh bool, err error) { var len uint32 //revive:enable:redefines-builtin-id - refreshInterval := config.Datadog().GetInt("windows_counter_refresh_interval") + refreshInterval := pkgconfigsetup.Datadog().GetInt("windows_counter_refresh_interval") if refreshInterval == 0 { // refresh disabled return false, nil diff --git a/pkg/util/podman/container.go b/pkg/util/podman/container.go index aa43b73251b80..d2180083a3d57 100644 --- a/pkg/util/podman/container.go +++ b/pkg/util/podman/container.go @@ -204,6 +204,10 @@ type ContainerRootFSConfig struct { // based on an image with this ID. // This conflicts with Rootfs. RootfsImageID string `json:"rootfsImageID,omitempty"` + // RootfsImageName is the (normalized) name of the image used to create + // the container. If the container was created from a Rootfs, this will + // be empty. + RootfsImageName string `json:"rootfsImageName,omitempty"` } // ContainerSecurityConfig is an embedded sub-config providing security configuration diff --git a/pkg/util/static_tags.go b/pkg/util/static_tags.go index 5239f3371ab6f..f0591d0243f9d 100644 --- a/pkg/util/static_tags.go +++ b/pkg/util/static_tags.go @@ -9,8 +9,8 @@ import ( "context" "strings" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fargate" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" @@ -33,7 +33,7 @@ func GetStaticTagsSlice(ctx context.Context) []string { tags := []string{} // DD_TAGS / DD_EXTRA_TAGS - tags = append(tags, configUtils.GetConfiguredTags(config.Datadog(), false)...) + tags = append(tags, configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false)...) // EKS Fargate specific tags if env.IsFeaturePresent(env.EKSFargate) { diff --git a/pkg/util/static_tags_test.go b/pkg/util/static_tags_test.go index 8ab0ab515f0aa..e9d9de3374ad8 100644 --- a/pkg/util/static_tags_test.go +++ b/pkg/util/static_tags_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) @@ -21,7 +20,7 @@ func TestStaticTags(t *testing.T) { mockConfig.SetWithoutSource("kubernetes_kubelet_nodename", "eksnode") defer mockConfig.SetWithoutSource("kubernetes_kubelet_nodename", "") - config.SetFeatures(t, env.EKSFargate) + env.SetFeatures(t, env.EKSFargate) t.Run("just tags", func(t *testing.T) { mockConfig.SetWithoutSource("tags", []string{"some:tag", "another:tag", "nocolon"}) @@ -63,7 +62,7 @@ func TestStaticTagsSlice(t *testing.T) { mockConfig.SetWithoutSource("kubernetes_kubelet_nodename", "eksnode") defer mockConfig.SetWithoutSource("kubernetes_kubelet_nodename", "") - config.SetFeatures(t, env.EKSFargate) + env.SetFeatures(t, env.EKSFargate) t.Run("just tags", func(t *testing.T) { mockConfig.SetWithoutSource("tags", []string{"some:tag", "another:tag", "nocolon"}) diff --git a/pkg/util/tagger/go.mod b/pkg/util/tagger/go.mod index 0d482a50ec691..63eca15ee4498 100644 --- a/pkg/util/tagger/go.mod +++ b/pkg/util/tagger/go.mod @@ -12,7 +12,9 @@ replace ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/util/executable => ../../util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil @@ -34,6 +36,8 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect @@ -60,7 +64,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.23.12 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -68,11 +72,11 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/pkg/util/tagger/go.sum b/pkg/util/tagger/go.sum index 77ba213060c82..765bdc23a7bf4 100644 --- a/pkg/util/tagger/go.sum +++ b/pkg/util/tagger/go.sum @@ -180,8 +180,9 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -251,15 +252,15 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -295,8 +296,8 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -312,8 +313,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/pkg/util/tagger/tagger.go b/pkg/util/tagger/tagger.go index 018c9052b8063..b8ae45a2f97c5 100644 --- a/pkg/util/tagger/tagger.go +++ b/pkg/util/tagger/tagger.go @@ -6,11 +6,21 @@ // Package tagger provides function to check if the tagger should use composite entity id and object store package tagger -import pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" +import ( + "sync" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" +) + +var useCompositeStore bool +var doOnce sync.Once // ShouldUseCompositeStore indicates whether the tagger should use the default or composite implementation // of entity ID and object store. // TODO: remove this when we switch over fully to the composite implementation func ShouldUseCompositeStore() bool { - return pkgconfigsetup.Datadog().GetBool("tagger.tagstore_use_composite_entity_id") + doOnce.Do(func() { + useCompositeStore = pkgconfigsetup.Datadog().GetBool("tagger.tagstore_use_composite_entity_id") + }) + return useCompositeStore } diff --git a/pkg/util/trivy/trivy.go b/pkg/util/trivy/trivy.go index c37803c49d78d..385068d0518aa 100644 --- a/pkg/util/trivy/trivy.go +++ b/pkg/util/trivy/trivy.go @@ -298,7 +298,7 @@ func (c *Collector) ScanDockerImage(ctx context.Context, imgMeta *workloadmeta.C func (c *Collector) scanOverlayFS(ctx context.Context, layers []string, imgMeta *workloadmeta.ContainerImageMetadata, scanOptions sbom.ScanOptions) (sbom.Report, error) { overlayFsReader := NewFS(layers) - report, err := c.scanFilesystem(ctx, overlayFsReader, ".", imgMeta, scanOptions) + report, err := c.scanFilesystem(ctx, overlayFsReader, "/", imgMeta, scanOptions) if err != nil { return nil, err } @@ -320,7 +320,7 @@ func (c *Collector) ScanContainerdImageFromSnapshotter(ctx context.Context, imgM } layers := extractLayersFromOverlayFSMounts(mounts) if len(layers) == 0 { - return nil, fmt.Errorf("unable to extract layers from overlayfs mounts for image %s", imgMeta.ID) + return nil, fmt.Errorf("unable to extract layers from overlayfs mounts %+v for image %s", mounts, imgMeta.ID) } ctx = namespaces.WithNamespace(ctx, imgMeta.Namespace) diff --git a/pkg/util/winutil/iphelper/routes.go b/pkg/util/winutil/iphelper/routes.go index 2de67ec872cae..08ca4f956dfe0 100644 --- a/pkg/util/winutil/iphelper/routes.go +++ b/pkg/util/winutil/iphelper/routes.go @@ -198,20 +198,3 @@ func GetIFTable() (table map[uint32]windows.MibIfRow, err error) { return table, nil } - -// Ntohs converts a network byte order 16 bit int to host byte order -func Ntohs(i uint16) uint16 { - return binary.BigEndian.Uint16((*(*[2]byte)(unsafe.Pointer(&i)))[:]) -} - -// Ntohl converts a network byte order 32 bit int to host byte order -func Ntohl(i uint32) uint32 { - return binary.BigEndian.Uint32((*(*[4]byte)(unsafe.Pointer(&i)))[:]) -} - -// Htonl converts a host byte order 32 bit int to network byte order -func Htonl(i uint32) uint32 { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, i) - return *(*uint32)(unsafe.Pointer(&b[0])) -} diff --git a/pkg/windowsdriver/include/procmonapi.h b/pkg/windowsdriver/include/procmonapi.h index e778b6f579d9c..47d876eddb9a0 100644 --- a/pkg/windowsdriver/include/procmonapi.h +++ b/pkg/windowsdriver/include/procmonapi.h @@ -16,7 +16,7 @@ typedef __int64 LONG64; typedef unsigned char uint8_t; // define a version signature so that the driver won't load out of date structures, etc. -#define DD_PROCMONDRIVER_VERSION 0x04 +#define DD_PROCMONDRIVER_VERSION 0x05 #define DD_PROCMONDRIVER_SIGNATURE ((uint64_t)0xDD01 << 32 | DD_PROCMONDRIVER_VERSION) #define DD_PROCMONDRIVER_DEVICE_TYPE FILE_DEVICE_UNKNOWN // for more information on defining control codes, see @@ -73,4 +73,7 @@ typedef struct _dd_process_notification { // unfortunately, SIDS are variable length as well uint64_t SidLen; uint64_t SidOffset; + + uint64_t EnvBlockLen; + uint64_t EnvOffset; } DD_PROCESS_NOTIFICATION; \ No newline at end of file diff --git a/pkg/windowsdriver/procmon/procmon.go b/pkg/windowsdriver/procmon/procmon.go index 484b297b4518b..d6f4765e79501 100644 --- a/pkg/windowsdriver/procmon/procmon.go +++ b/pkg/windowsdriver/procmon/procmon.go @@ -26,6 +26,7 @@ type ProcessStartNotification struct { OwnerSidString string ImageFile string CmdLine string + EnvBlock []string // if this is nonzero, functions as notification to // the probe that the buffer size isn't large enough RequiredSize uint32 @@ -183,6 +184,7 @@ func decodeStruct(data []uint8, sz uint32) (start *ProcessStartNotification, sto var imagefile string var cmdline string var sidstring string + var envvars []string if n.ImageFileLen > 0 { imagefile = winutil.ConvertWindowsString(data[n.ImageFileOffset : n.ImageFileOffset+n.ImageFileLen]) @@ -194,6 +196,13 @@ func decodeStruct(data []uint8, sz uint32) (start *ProcessStartNotification, sto if n.SidLen > 0 { sidstring = winutil.ConvertWindowsString(data[n.SidOffset : n.SidOffset+n.SidLen]) } + + if n.EnvBlockLen > 0 { + envblockstart := (*uint16)(unsafe.Pointer(&data[n.EnvOffset])) + + envblock := unsafe.Slice(envblockstart, uint32(n.EnvBlockLen/2)) + envvars = winutil.ConvertWindowsStringList(envblock) + } start = &ProcessStartNotification{ Pid: n.ProcessId, PPid: n.ParentProcessId, @@ -202,6 +211,7 @@ func decodeStruct(data []uint8, sz uint32) (start *ProcessStartNotification, sto ImageFile: imagefile, CmdLine: cmdline, OwnerSidString: sidstring, + EnvBlock: envvars, } if n.SizeNeeded > n.Size { start.RequiredSize = uint32(n.SizeNeeded) diff --git a/pkg/windowsdriver/procmon/types_windows.go b/pkg/windowsdriver/procmon/types_windows.go index 6225ae3162cda..e76bcb89b7b8f 100644 --- a/pkg/windowsdriver/procmon/types_windows.go +++ b/pkg/windowsdriver/procmon/types_windows.go @@ -3,14 +3,14 @@ package procmon -const Signature = 0xdd0100000004 +const Signature = 0xdd0100000005 const ( ProcmonStartIOCTL = 0x222006 ProcmonStopIOCTL = 0x22200a ProcmonStatsIOCTL = 0x22200e - ProcmonSignature = 0xdd0100000004 + ProcmonSignature = 0xdd0100000005 ) const ( @@ -42,7 +42,9 @@ type DDProcessNotification struct { CommandLineOffset uint64 SidLen uint64 SidOffset uint64 + EnvBlockLen uint64 + EnvOffset uint64 } -const DDProcessNotificationSize = 0x68 +const DDProcessNotificationSize = 0x78 const DDProcmonStatsSize = 0x30 diff --git a/release.json b/release.json index 6d80d1d5fccc1..5561c338a8dc5 100644 --- a/release.json +++ b/release.json @@ -1,14 +1,14 @@ { "base_branch": "main", - "current_milestone": "7.58.0", + "current_milestone": "7.59.0", "last_stable": { "6": "6.53.0", - "7": "7.56.2" + "7": "7.57.1" }, "nightly": { "INTEGRATIONS_CORE_VERSION": "master", - "OMNIBUS_SOFTWARE_VERSION": "b479a9f6605bf3c28284829608fd6365d95c11f5", - "OMNIBUS_RUBY_VERSION": "f3fc847e03ba7081e266b2d333210ba129128a14", + "OMNIBUS_SOFTWARE_VERSION": "375618d70253293d71b13f9385260aa3dedd7125", + "OMNIBUS_RUBY_VERSION": "1c2069623d3595390392248a34efdf5e5411ab95", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", "MACOS_BUILD_VERSION": "master", @@ -17,8 +17,8 @@ "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2", "SECURITY_AGENT_POLICIES_VERSION": "master", "WINDOWS_DDPROCMON_DRIVER": "release-signed", - "WINDOWS_DDPROCMON_VERSION": "1.0.4", - "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490", + "WINDOWS_DDPROCMON_VERSION": "1.1.0", + "WINDOWS_DDPROCMON_SHASUM": "a75da24f2385ff41246cad4f19e99642ca55199ea9e5b0c25dc5b9df4af035d7", "WINDOWS_APMINJECT_COMMENT": "The WINDOWS_APMINJECT entries below should NOT be added to the release targets", "WINDOWS_APMINJECT_MODULE": "release-signed", "WINDOWS_APMINJECT_VERSION": "1.1.3", @@ -26,8 +26,8 @@ }, "nightly-a7": { "INTEGRATIONS_CORE_VERSION": "master", - "OMNIBUS_SOFTWARE_VERSION": "b479a9f6605bf3c28284829608fd6365d95c11f5", - "OMNIBUS_RUBY_VERSION": "f3fc847e03ba7081e266b2d333210ba129128a14", + "OMNIBUS_SOFTWARE_VERSION": "375618d70253293d71b13f9385260aa3dedd7125", + "OMNIBUS_RUBY_VERSION": "1c2069623d3595390392248a34efdf5e5411ab95", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", "MACOS_BUILD_VERSION": "master", @@ -36,8 +36,8 @@ "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2", "SECURITY_AGENT_POLICIES_VERSION": "master", "WINDOWS_DDPROCMON_DRIVER": "release-signed", - "WINDOWS_DDPROCMON_VERSION": "1.0.4", - "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490", + "WINDOWS_DDPROCMON_VERSION": "1.1.0", + "WINDOWS_DDPROCMON_SHASUM": "a75da24f2385ff41246cad4f19e99642ca55199ea9e5b0c25dc5b9df4af035d7", "WINDOWS_APMINJECT_COMMENT": "The WINDOWS_APMINJECT entries below should NOT be added to the release targets", "WINDOWS_APMINJECT_MODULE": "release-signed", "WINDOWS_APMINJECT_VERSION": "1.1.3", @@ -49,14 +49,14 @@ "OMNIBUS_RUBY_VERSION": "7.56.0-rc.1", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", - "SECURITY_AGENT_POLICIES_VERSION": "v0.57.0", + "SECURITY_AGENT_POLICIES_VERSION": "v0.58.0", "MACOS_BUILD_VERSION": "6.56.0-rc.3", "WINDOWS_DDNPM_DRIVER": "release-signed", - "WINDOWS_DDNPM_VERSION": "2.7.0", - "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8", + "WINDOWS_DDNPM_VERSION": "2.7.1", + "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2", "WINDOWS_DDPROCMON_DRIVER": "release-signed", - "WINDOWS_DDPROCMON_VERSION": "1.0.2", - "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab" + "WINDOWS_DDPROCMON_VERSION": "1.0.4", + "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490" }, "release-a7": { "INTEGRATIONS_CORE_VERSION": "7.56.0-rc.2", @@ -64,14 +64,14 @@ "OMNIBUS_RUBY_VERSION": "7.56.0-rc.1", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", - "SECURITY_AGENT_POLICIES_VERSION": "v0.57.0", + "SECURITY_AGENT_POLICIES_VERSION": "v0.58.0", "MACOS_BUILD_VERSION": "7.56.0-rc.3", "WINDOWS_DDNPM_DRIVER": "release-signed", - "WINDOWS_DDNPM_VERSION": "2.7.0", - "WINDOWS_DDNPM_SHASUM": "de6a2f437b906d1d0f3cfc9222c7f686b3d69726355c940476448a34535064c8", + "WINDOWS_DDNPM_VERSION": "2.7.1", + "WINDOWS_DDNPM_SHASUM": "0f4665761324e1fef1c21651be5b70e79c72b5e7e5662d74619e7db2b27d5bc2", "WINDOWS_DDPROCMON_DRIVER": "release-signed", - "WINDOWS_DDPROCMON_VERSION": "1.0.2", - "WINDOWS_DDPROCMON_SHASUM": "cf55e5163659dbbfac0c0cced6559a3042107da9e4df8140bea17067278061ab" + "WINDOWS_DDPROCMON_VERSION": "1.0.4", + "WINDOWS_DDPROCMON_SHASUM": "3a23804adc7280390aabc01f0b709853755baa111f821f99627cd661ee917490" }, "dca-1.17.0": { "SECURITY_AGENT_POLICIES_VERSION": "v0.18.6" diff --git a/releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml b/releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml new file mode 100644 index 0000000000000..807f941327e69 --- /dev/null +++ b/releasenotes-dca/notes/admin-controller-vols-with-type-socket-dd57e8c0d3bb2c51.yaml @@ -0,0 +1,17 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Added a new option for the Cluster Agent + ("admission_controller.inject_config.type_socket_volumes") to specify that + injected volumes should be of type "Socket". This option is disabled by + default. When set to true, injected pods will not start until the Agent + creates the DogstatsD and trace-agent sockets. This ensures no traces or + DogstatsD metrics are lost, but it can cause the pod to wait if the Agent + has issues creating the sockets. diff --git a/releasenotes/notes/Bump-embedded-Python-version-to-address-vulnerability-8c18ff42b95042f7.yaml b/releasenotes/notes/Bump-embedded-Python-version-to-address-vulnerability-8c18ff42b95042f7.yaml new file mode 100644 index 0000000000000..6594849bcf2c2 --- /dev/null +++ b/releasenotes/notes/Bump-embedded-Python-version-to-address-vulnerability-8c18ff42b95042f7.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +security: + - | + Bump embedded Python version to 3.12.6 to address `CVE-2024-4030` and `CVE-2024-4741`. diff --git a/releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml b/releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml new file mode 100644 index 0000000000000..1866763d95841 --- /dev/null +++ b/releasenotes/notes/ad-label-support-logs-24ddb721e3f429fe.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Adds missing support for the logs config key to work with AD annotations V2. + diff --git a/releasenotes/notes/add-run-in-core-agent-to-template-e6c2c3134d2fb17d.yaml b/releasenotes/notes/add-run-in-core-agent-to-template-e6c2c3134d2fb17d.yaml new file mode 100644 index 0000000000000..204176b172025 --- /dev/null +++ b/releasenotes/notes/add-run-in-core-agent-to-template-e6c2c3134d2fb17d.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + Add ability to run process/container collection on the core Agent (Linux only). This is controlled + by the `process_config.run_in_core_agent.enabled` option in datadog.yaml. diff --git a/releasenotes/notes/addsbomtoconfig-99a7cd52fa412336.yaml b/releasenotes/notes/addsbomtoconfig-99a7cd52fa412336.yaml new file mode 100644 index 0000000000000..835cceda2fd29 --- /dev/null +++ b/releasenotes/notes/addsbomtoconfig-99a7cd52fa412336.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes the default configuration template to include the Cloud Security Management configuration options. diff --git a/releasenotes/notes/auto-instrumentation-fix-memory-request-8391bb2d06ac6990.yaml b/releasenotes/notes/auto-instrumentation-fix-memory-request-8391bb2d06ac6990.yaml new file mode 100644 index 0000000000000..4acb4ef044224 --- /dev/null +++ b/releasenotes/notes/auto-instrumentation-fix-memory-request-8391bb2d06ac6990.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fix OOM error with cluster agent auto instrumentation by increasing default memory request from 20Mi to 100Mi. diff --git a/releasenotes/notes/bump-go-sqllexer-0.0.15-18432a15bec8d683.yaml b/releasenotes/notes/bump-go-sqllexer-0.0.15-18432a15bec8d683.yaml new file mode 100644 index 0000000000000..aab369a281863 --- /dev/null +++ b/releasenotes/notes/bump-go-sqllexer-0.0.15-18432a15bec8d683.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + DBM: Add configuration options to SQL obfuscator to customize the obfuscation of SQL statements: + - ``KeepJSONPath`` - option to control whether JSON paths following JSON operators in SQL statements should be obfuscated. This option is only valid when ``ObfuscationMode`` is ``obfuscate_and_normalize``. diff --git a/releasenotes/notes/bump-go-to-1.22.7-1002302b0ef8c246.yaml b/releasenotes/notes/bump-go-to-1.22.7-1002302b0ef8c246.yaml new file mode 100644 index 0000000000000..54e50ffbf243f --- /dev/null +++ b/releasenotes/notes/bump-go-to-1.22.7-1002302b0ef8c246.yaml @@ -0,0 +1,4 @@ +--- +enhancements: +- | + Agents are now built with Go ``1.22.7``. diff --git a/releasenotes/notes/create-trace-context-from-step-function-invocation-7b9d1c3536f7135b.yaml b/releasenotes/notes/create-trace-context-from-step-function-invocation-7b9d1c3536f7135b.yaml new file mode 100644 index 0000000000000..a869025dcd272 --- /dev/null +++ b/releasenotes/notes/create-trace-context-from-step-function-invocation-7b9d1c3536f7135b.yaml @@ -0,0 +1,9 @@ +--- +enhancements: + - | + While using the AWS Lambda Extension, when a Lambda Function is invoked by + a [properly instrumented][1] Step Function, the Lambda Function will create + its Trace and Parent IDs deterministically based on the Step Function's + execution context. + [1]: https://docs.datadoghq.com/serverless/step_functions/installation/?tab=custom "Install Serverless Monitoring for AWS Step Functions" + diff --git a/releasenotes/notes/dotnet-lib-inject-v3-d5fb50cd7eccb116.yaml b/releasenotes/notes/dotnet-lib-inject-v3-d5fb50cd7eccb116.yaml new file mode 100644 index 0000000000000..947c33e62ab89 --- /dev/null +++ b/releasenotes/notes/dotnet-lib-inject-v3-d5fb50cd7eccb116.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Updates default .NET library used for auto-instrumentation from v2 to v3 \ No newline at end of file diff --git a/releasenotes/notes/enable-sqllexer-obfuscation-f0242108ff24efeb.yaml b/releasenotes/notes/enable-sqllexer-obfuscation-f0242108ff24efeb.yaml new file mode 100644 index 0000000000000..d8b2f6672b471 --- /dev/null +++ b/releasenotes/notes/enable-sqllexer-obfuscation-f0242108ff24efeb.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + APM: Add new 'sqllexer' feature flag for the Trace Agent, which enables + the sqllexer imprementation of the SQL Obfuscator. diff --git a/releasenotes/notes/fix-container-check-memleak-d9ff028d5499197c.yaml b/releasenotes/notes/fix-container-check-memleak-d9ff028d5499197c.yaml new file mode 100644 index 0000000000000..1166a23c8be7e --- /dev/null +++ b/releasenotes/notes/fix-container-check-memleak-d9ff028d5499197c.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes memory leak caused by container check. diff --git a/releasenotes/notes/fix-forwarder-health-check-09eeefbe1a4e20d1.yaml b/releasenotes/notes/fix-forwarder-health-check-09eeefbe1a4e20d1.yaml new file mode 100644 index 0000000000000..65bfcbb319166 --- /dev/null +++ b/releasenotes/notes/fix-forwarder-health-check-09eeefbe1a4e20d1.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix the forwarder health check so that it reports unhealthy when the API key is invalid. diff --git a/releasenotes/notes/fix-opensuse-15rc6-sendpage-11ba41034deaa721.yaml b/releasenotes/notes/fix-opensuse-15rc6-sendpage-11ba41034deaa721.yaml new file mode 100644 index 0000000000000..7f7dcaba44f35 --- /dev/null +++ b/releasenotes/notes/fix-opensuse-15rc6-sendpage-11ba41034deaa721.yaml @@ -0,0 +1,5 @@ + +--- +fixes: + - | + Fixed issue with openSUSE 15 RC 6 where the eBPF tracer wouldn't start due to a failed validation of the ``tcp_sendpage`` probe. diff --git a/releasenotes/notes/fix-panic-on-readonly-filesystems-1012ba761aa70aaa.yaml b/releasenotes/notes/fix-panic-on-readonly-filesystems-1012ba761aa70aaa.yaml new file mode 100644 index 0000000000000..fda72f5fb9852 --- /dev/null +++ b/releasenotes/notes/fix-panic-on-readonly-filesystems-1012ba761aa70aaa.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes a panic caused by running the Agent on readonly filesystems. The + Agent will now return integration launchers and handle memory gracefully. diff --git a/releasenotes/notes/fix-scrubbing-arg-with-spaces-7d4372e6cf865856.yaml b/releasenotes/notes/fix-scrubbing-arg-with-spaces-7d4372e6cf865856.yaml new file mode 100644 index 0000000000000..08d9695a3ecdd --- /dev/null +++ b/releasenotes/notes/fix-scrubbing-arg-with-spaces-7d4372e6cf865856.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fix Process Agent argument scrubbing to allow scrubbing of quoted arguments. diff --git a/releasenotes/notes/fix-scrubbing-arg-with-spaces-orchestrator-e88651f29cf1120d.yaml b/releasenotes/notes/fix-scrubbing-arg-with-spaces-orchestrator-e88651f29cf1120d.yaml new file mode 100644 index 0000000000000..f85665875ff4e --- /dev/null +++ b/releasenotes/notes/fix-scrubbing-arg-with-spaces-orchestrator-e88651f29cf1120d.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fix Orchestrator argument scrubbing to allow scrubbing of quoted arguments. diff --git a/releasenotes/notes/install-selinux-system-probe-policy-on-Oracle-linux-68e1fa8eacfde513.yaml b/releasenotes/notes/install-selinux-system-probe-policy-on-Oracle-linux-68e1fa8eacfde513.yaml new file mode 100644 index 0000000000000..234ac725ca17b --- /dev/null +++ b/releasenotes/notes/install-selinux-system-probe-policy-on-Oracle-linux-68e1fa8eacfde513.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + The system-probe selinux policy is now installed on Oracle Linux diff --git a/releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml b/releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml new file mode 100644 index 0000000000000..79170d87fd8cb --- /dev/null +++ b/releasenotes/notes/network-path-change-timeout-configuration-4ccec24497bd1574.yaml @@ -0,0 +1,14 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +upgrade: + - | + Changes behavior of the timeout for Network Path. Previously, the timeout + signified the total time to wait for a full traceroute to complete. Now, + the timeout signifies the time to wait for each hop in the traceroute. + Additionally, the default timeout has been changed to 1000ms. diff --git a/releasenotes/notes/network-path-latency-fix-575efe1aa26c250b.yaml b/releasenotes/notes/network-path-latency-fix-575efe1aa26c250b.yaml new file mode 100644 index 0000000000000..a710cbb370c15 --- /dev/null +++ b/releasenotes/notes/network-path-latency-fix-575efe1aa26c250b.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes an issue where TCP traceroute latency was not being calculated correctly. diff --git a/releasenotes/notes/network-path-update-defaults-a4b4eb903130e283.yaml b/releasenotes/notes/network-path-update-defaults-a4b4eb903130e283.yaml new file mode 100644 index 0000000000000..c1261d2c08792 --- /dev/null +++ b/releasenotes/notes/network-path-update-defaults-a4b4eb903130e283.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Increases the default input channel, processing channel, and context store sizes + for network traffic paths. diff --git a/releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml b/releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml new file mode 100644 index 0000000000000..fc143b588792e --- /dev/null +++ b/releasenotes/notes/networkpath-use-default-udp-port-4145a4b3700e98f4.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + The default UDP port for traceroute (port 33434) is now used for Network Traffic based paths, instead of the port detected by NPM. diff --git a/releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml b/releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml new file mode 100644 index 0000000000000..19c9c532dfc58 --- /dev/null +++ b/releasenotes/notes/npm-udp-packet-counting-ebce8760bec29f50.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + NPM - adds UDP "Packets Sent" and "Packets Received" to the network telemetry in Linux. diff --git a/releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml b/releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml new file mode 100644 index 0000000000000..e5bf2a5ef0573 --- /dev/null +++ b/releasenotes/notes/oom-kill-cgroup-victim-2aa7ca4e8e3ffac2.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + OOM Kill Check now reports the cgroup name of the victim process rather than the triggering process. diff --git a/releasenotes/notes/openssl_3.3.2-f873f60a69cfe59c.yaml b/releasenotes/notes/openssl_3.3.2-f873f60a69cfe59c.yaml new file mode 100644 index 0000000000000..91e622eb1a257 --- /dev/null +++ b/releasenotes/notes/openssl_3.3.2-f873f60a69cfe59c.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +security: + - | + Update OpenSSL to 3.3.2 (on Linux & macOS) in order to mitigate CVE-2024-6119. diff --git a/releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml b/releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml new file mode 100644 index 0000000000000..194a7a58b5d88 --- /dev/null +++ b/releasenotes/notes/oracle-telemetry-bcc1ab08a2b92bc6.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +other: + - | + Adds Agent telemetry for Oracle collector. diff --git a/releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml b/releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml new file mode 100644 index 0000000000000..ff993fc5f45d1 --- /dev/null +++ b/releasenotes/notes/otel-replace-logging-exporter-82062f8c865cb529.yaml @@ -0,0 +1,16 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +upgrade: + - | + Removed the deprecated config option ``otlp_config.debug.loglevel`` in favor of ``otlp_config.debug.verbosity``: + * ``loglevel: debug`` maps to ``verbosity: detailed`` + * ``loglevel: info`` maps to ``verbosity: normal`` + * ``loglevel: warn/error`` maps to ``verbosity: basic`` + * ``loglevel: disabled`` maps to ``verbosity: none`` + diff --git a/releasenotes/notes/otlp-env-convention-b5c9da638d.yaml b/releasenotes/notes/otlp-env-convention-b5c9da638d.yaml new file mode 100644 index 0000000000000..be46b8baa5fa6 --- /dev/null +++ b/releasenotes/notes/otlp-env-convention-b5c9da638d.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + The OTLP ingest endpoint now maps the new OTel semantic convention `deployment.environment.name` to `env` diff --git a/releasenotes/notes/podman_file_logs_path_rootless_support-55a1af8f41cf94c4.yaml b/releasenotes/notes/podman_file_logs_path_rootless_support-55a1af8f41cf94c4.yaml new file mode 100644 index 0000000000000..17fab73bf7b3d --- /dev/null +++ b/releasenotes/notes/podman_file_logs_path_rootless_support-55a1af8f41cf94c4.yaml @@ -0,0 +1,4 @@ +--- +enhancements: + - | + Adds support for file log collection from Podman rootless containers when ``logs_config.use_podman_logs`` is set to ``true`` and ``podman_db_path`` is configured. diff --git a/releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml b/releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml new file mode 100644 index 0000000000000..39e25e452387c --- /dev/null +++ b/releasenotes/notes/snmp-session-8e4b5a534a8c9837.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix a bug preventing SNMP V3 reconnection. diff --git a/releasenotes/notes/sql-obfuscator-prepared-statement-8705d7b7c00cf3ab.yaml b/releasenotes/notes/sql-obfuscator-prepared-statement-8705d7b7c00cf3ab.yaml new file mode 100644 index 0000000000000..2abf5792d829d --- /dev/null +++ b/releasenotes/notes/sql-obfuscator-prepared-statement-8705d7b7c00cf3ab.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + APM: Fix obfuscation of SQL queries containing non-numeric prepared statement variables. diff --git a/releasenotes/notes/windows_sysprobe_socket_to_pipe-089738b8f07a56ad.yaml b/releasenotes/notes/windows_sysprobe_socket_to_pipe-089738b8f07a56ad.yaml new file mode 100644 index 0000000000000..a65ebb81a558f --- /dev/null +++ b/releasenotes/notes/windows_sysprobe_socket_to_pipe-089738b8f07a56ad.yaml @@ -0,0 +1,7 @@ +other: + - | + On Windows, the TCP socket transport mechanism for system probe + communications has been replaced with a named pipe. + This deprecates the system_probe_config.sysprobe_socket configuration + entry for Windows. + The new fixed named pipe path is \\.\pipe\dd_system_probe. diff --git a/rtloader/README.md b/rtloader/README.md index ebcf76a32dd4c..9b9b033f1a530 100644 --- a/rtloader/README.md +++ b/rtloader/README.md @@ -34,7 +34,7 @@ Most of the code used to extend the embedded interpreter is there. * C/C++ compiler * Python 2.7.x development packages -* Python 3.8.x development packages +* Python 3.12.x development packages * Cmake version 3.12 or above * Go compiler with `cgo` capabilities to run the tests diff --git a/tasks/__init__.py b/tasks/__init__.py index 477dff8c510e9..9b061ac2cdbcd 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -38,6 +38,7 @@ modules, msi, new_e2e_tests, + notes, notify, omnibus, oracle, @@ -176,6 +177,7 @@ ns.add_collection(gitlab_helpers, "gitlab") ns.add_collection(package) ns.add_collection(pipeline) +ns.add_collection(notes) ns.add_collection(notify) ns.add_collection(oracle) ns.add_collection(otel_agent) diff --git a/tasks/agent.py b/tasks/agent.py index c92f5d8399782..bf9ac70380e54 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -45,6 +45,22 @@ AgentFlavor.base: ["process-agent", "trace-agent", "security-agent"], } +if sys.platform == "win32": + # Our `ridk enable` toolchain puts Ruby's bin dir at the front of the PATH + # This dir contains `aws.rb` which will execute if we just call `aws`, + # so we need to be explicit about the executable extension/path + # NOTE: awscli seems to have a bug where running "aws.cmd", quoted, without a full path, + # causes it to fail due to not searching the PATH. + # NOTE: The full path to `aws.cmd` is likely to contain spaces, so if the full path is + # used instead, it must be quoted when passed to ctx.run. + # This unfortunately means that the quoting requirements are different if you use + # the full path or just the filename. + # aws.cmd -> awscli v1 from Python env + AWS_CMD = "aws.cmd" + # TODO: can we use use `aws.exe` from AWSCLIv2? E2E expects v2. +else: + AWS_CMD = "aws" + AGENT_CORECHECKS = [ "container", "containerd", @@ -76,6 +92,7 @@ "orchestrator_ecs", "cisco_sdwan", "network_path", + "service_discovery", ] WINDOWS_CORECHECKS = [ @@ -453,6 +470,8 @@ def hacky_dev_image_build( ctx, base_image=None, target_image="agent", + process_agent=False, + trace_agent=False, push=False, signed_pull=False, ): @@ -491,6 +510,18 @@ def hacky_dev_image_build( f'perl -0777 -pe \'s|{extracted_python_dir}(/opt/datadog-agent/embedded/lib/python\\d+\\.\\d+/../..)|substr $1."\\0"x length$&,0,length$&|e or die "pattern not found"\' -i dev/lib/libdatadog-agent-three.so' ) + copy_extra_agents = "" + if process_agent: + from tasks.process_agent import build as process_agent_build + + process_agent_build(ctx, bundle=False) + copy_extra_agents += "COPY bin/process-agent/process-agent /opt/datadog-agent/embedded/bin/process-agent\n" + if trace_agent: + from tasks.trace_agent import build as trace_agent_build + + trace_agent_build(ctx) + copy_extra_agents += "COPY bin/trace-agent/trace-agent /opt/datadog-agent/embedded/bin/trace-agent\n" + with tempfile.NamedTemporaryFile(mode='w') as dockerfile: dockerfile.write( f'''FROM ubuntu:latest AS src @@ -518,6 +549,13 @@ def hacky_dev_image_build( RUN go install github.com/go-delve/delve/cmd/dlv@latest +FROM {base_image} AS bash_completion + +RUN apt-get update && \ + apt-get install -y gawk + +RUN awk -i inplace '!/^#/ {{uncomment=0}} uncomment {{gsub(/^#/, "")}} /# enable bash completion/ {{uncomment=1}} {{print}}' /etc/bash.bashrc + FROM {base_image} ENV DEBIAN_FRONTEND=noninteractive @@ -528,10 +566,12 @@ def hacky_dev_image_build( ENV DELVE_PAGER=less COPY --from=dlv /go/bin/dlv /usr/local/bin/dlv +COPY --from=bash_completion /etc/bash.bashrc /etc/bash.bashrc COPY --from=src /usr/src/datadog-agent {os.getcwd()} COPY --from=bin /opt/datadog-agent/bin/agent/agent /opt/datadog-agent/bin/agent/agent COPY --from=bin /opt/datadog-agent/embedded/lib/libdatadog-agent-rtloader.so.0.1.0 /opt/datadog-agent/embedded/lib/libdatadog-agent-rtloader.so.0.1.0 COPY --from=bin /opt/datadog-agent/embedded/lib/libdatadog-agent-three.so /opt/datadog-agent/embedded/lib/libdatadog-agent-three.so +{copy_extra_agents} RUN agent completion bash > /usr/share/bash-completion/completions/agent RUN process-agent completion bash > /usr/share/bash-completion/completions/process-agent RUN security-agent completion bash > /usr/share/bash-completion/completions/security-agent @@ -792,7 +832,7 @@ def version( @task -def get_integrations_from_cache(ctx, python, bucket, branch, integrations_dir, target_dir, integrations, awscli="aws"): +def get_integrations_from_cache(ctx, python, bucket, branch, integrations_dir, target_dir, integrations): """ Get cached integration wheels for given integrations. python: Python version to retrieve integrations for @@ -801,7 +841,6 @@ def get_integrations_from_cache(ctx, python, bucket, branch, integrations_dir, t integrations_dir: directory with Git repository of integrations target_dir: local directory to put integration wheels to integrations: comma-separated names of the integrations to try to retrieve from cache - awscli: AWS CLI executable to call """ integrations_hashes = {} for integration in integrations.strip().split(","): @@ -819,13 +858,9 @@ def get_integrations_from_cache(ctx, python, bucket, branch, integrations_dir, t # On windows, maximum length of a command line call is 8191 characters, therefore # we do multiple syncs that fit within that limit (we use 8100 as a nice round number # and just to make sure we don't do any of-by-one errors that would break this). - # WINDOWS NOTES: on Windows, the awscli is usually in program files, so we have to wrap the - # executable in quotes; also we have to not put the * in quotes, as there's no - # expansion on it, unlike on Linux + # WINDOWS NOTES: we have to not put the * in quotes, as there's no expansion on it, unlike on Linux exclude_wildcard = "*" if platform.system().lower() == "windows" else "'*'" - sync_command_prefix = ( - f"\"{awscli}\" s3 sync s3://{bucket} {target_dir} --no-sign-request --exclude {exclude_wildcard}" - ) + sync_command_prefix = f"{AWS_CMD} s3 sync s3://{bucket} {target_dir} --no-sign-request --exclude {exclude_wildcard}" sync_commands = [[[sync_command_prefix], len(sync_command_prefix)]] for integration, hash in integrations_hashes.items(): include_arg = " --include " + CACHED_WHEEL_FULL_PATH_PATTERN.format( @@ -873,7 +908,7 @@ def get_integrations_from_cache(ctx, python, bucket, branch, integrations_dir, t @task -def upload_integration_to_cache(ctx, python, bucket, branch, integrations_dir, build_dir, integration, awscli="aws"): +def upload_integration_to_cache(ctx, python, bucket, branch, integrations_dir, build_dir, integration): """ Upload a built integration wheel for given integration. python: Python version the integration is built for @@ -882,7 +917,6 @@ def upload_integration_to_cache(ctx, python, bucket, branch, integrations_dir, b integrations_dir: directory with Git repository of integrations build_dir: directory containing the built integration wheel integration: name of the integration being cached - awscli: AWS CLI executable to call """ matching_glob = os.path.join(build_dir, CACHED_WHEEL_FILENAME_PATTERN.format(integration=integration)) files_matched = glob.glob(matching_glob) @@ -904,8 +938,7 @@ def upload_integration_to_cache(ctx, python, bucket, branch, integrations_dir, b hash=hash, python_version=python, branch=branch ) + os.path.basename(wheel_path) print(f"Caching wheel {target_name}") - # NOTE: on Windows, the awscli is usually in program files, so we have the executable - ctx.run(f"\"{awscli}\" s3 cp {wheel_path} s3://{bucket}/{target_name} --acl public-read") + ctx.run(f"{AWS_CMD} s3 cp {wheel_path} s3://{bucket}/{target_name} --acl public-read") @task() diff --git a/tasks/dogstatsd.py b/tasks/dogstatsd.py index 874eb4974341d..6373af271bf0b 100644 --- a/tasks/dogstatsd.py +++ b/tasks/dogstatsd.py @@ -18,7 +18,7 @@ # constants DOGSTATSD_BIN_PATH = os.path.join(".", "bin", "dogstatsd") STATIC_BIN_PATH = os.path.join(".", "bin", "static") -MAX_BINARY_SIZE = 42 * 1024 +MAX_BINARY_SIZE = 44 * 1024 DOGSTATSD_TAG = "datadog/dogstatsd:master" diff --git a/tasks/gitlab_helpers.py b/tasks/gitlab_helpers.py index 0e347c61997b1..26aa05b84b157 100644 --- a/tasks/gitlab_helpers.py +++ b/tasks/gitlab_helpers.py @@ -8,13 +8,18 @@ import os import tempfile +import yaml from invoke import task +from invoke.exceptions import Exit +from tasks.kernel_matrix_testing.ci import get_kmt_dashboard_links from tasks.libs.ciproviders.gitlab_api import ( + compute_gitlab_ci_config_diff, get_all_gitlab_ci_configurations, get_gitlab_ci_configuration, get_gitlab_repo, print_gitlab_ci_configuration, + resolve_gitlab_ci_configuration, ) from tasks.libs.civisibility import ( get_pipeline_link_to_job_id, @@ -23,6 +28,7 @@ get_test_link_to_job_on_main, ) from tasks.libs.common.color import Color, color_message +from tasks.libs.common.utils import experimental @task @@ -62,7 +68,7 @@ def generate_ci_visibility_links(_ctx, output: str | None): def create_gitlab_annotations_report(ci_job_id: str, ci_job_name: str): - return { + links = { "CI Visibility": [ { "external_link": { @@ -91,6 +97,12 @@ def create_gitlab_annotations_report(ci_job_id: str, ci_job_name: str): ] } + kmt_links = get_kmt_dashboard_links() + if kmt_links: + links["KMT Dashboard"] = kmt_links + + return links + def print_gitlab_object(get_object, ctx, ids, repo='DataDog/datadog-agent', jq: str | None = None, jq_colors=True): """ @@ -136,6 +148,80 @@ def get_job(repo, id): print_gitlab_object(get_job, ctx, ids, repo, jq, jq_colors) +@task +@experimental( + 'This task takes into account only explicit dependencies (job `needs` / `dependencies`), implicit dependencies (stages order) are ignored' +) +def gen_config_subset(ctx, jobs, dry_run=False, force=False): + """ + Will generate a full .gitlab-ci.yml containing only the jobs necessary to run the target jobs `jobs`. + That is, the resulting pipeline will have `jobs` as last jobs to run. + + Warning: This doesn't take implicit dependencies into account (stages order), only explicit dependencies (job `needs` / `dependencies`). + + - dry_run: Print only the new configuration without writing it to the .gitlab-ci.yml file. + - force: Force the update of the .gitlab-ci.yml file even if it has been modified. + + Example: + $ inv gitlab.gen-config-subset tests_deb-arm64-py3 + $ inv gitlab.gen-config-subset tests_rpm-arm64-py3,tests_deb-arm64-py3 --dry-run + """ + + jobs_to_keep = ['cancel-prev-pipelines', 'github_rate_limit_info', 'setup_agent_version'] + attributes_to_keep = 'stages', 'variables', 'default', 'workflow' + + # .gitlab-ci.yml should not be modified + if not force and not dry_run and ctx.run('git status -s .gitlab-ci.yml', hide='stdout').stdout.strip(): + raise Exit(color_message('The .gitlab-ci.yml file should not be modified as it will be overwritten', Color.RED)) + + config = resolve_gitlab_ci_configuration(ctx, '.gitlab-ci.yml') + + jobs = [j for j in jobs.split(',') if j] + jobs_to_keep + required = set() + + def add_dependencies(job): + nonlocal required, config + + if job in required: + return + required.add(job) + + dependencies = [] + if 'needs' in config[job]: + dependencies = config[job]['needs'] + if 'dependencies' in config[job]: + dependencies = config[job]['dependencies'] + + for dep in dependencies: + if isinstance(dep, dict): + dep = dep['job'] + add_dependencies(dep) + + # Make a DFS to find all the jobs that are needed to run the target jobs + for job in jobs: + add_dependencies(job) + + new_config = {job: config[job] for job in required} + + # Remove extends + for job in new_config.values(): + job.pop('extends', None) + + # Keep gitlab config + for attr in attributes_to_keep: + new_config[attr] = config[attr] + + content = yaml.safe_dump(new_config) + + if dry_run: + print(content) + else: + with open('.gitlab-ci.yml', 'w') as f: + f.write(content) + + print(color_message('The .gitlab-ci.yml file has been updated', Color.GREEN)) + + @task def print_job_trace(_, job_id, repo='DataDog/datadog-agent'): """ @@ -157,7 +243,7 @@ def print_ci( keep_special_objects: bool = False, expand_matrix: bool = False, git_ref: str | None = None, - ignore_errors: bool = False, + with_lint: bool = True, ): """ Prints the full gitlab ci configuration. @@ -166,7 +252,7 @@ def print_ci( - clean: Apply post processing to make output more readable (remove extends, flatten lists of lists...) - keep_special_objects: If True, do not filter out special objects (variables, stages etc.) - expand_matrix: Will expand matrix jobs into multiple jobs - - ignore_errors: If True, ignore errors in the gitlab configuration (only process yaml) + - with_lint: If False, do not lint the configuration - git_ref: If provided, use this git reference to fetch the configuration - NOTE: This requires a full api token access level to the repository """ @@ -177,7 +263,7 @@ def print_ci( clean=clean, expand_matrix=expand_matrix, git_ref=git_ref, - ignore_errors=ignore_errors, + with_lint=with_lint, keep_special_objects=keep_special_objects, ) @@ -196,3 +282,31 @@ def print_entry_points(ctx): print(len(entry_points), 'entry points:') for entry_point, config in entry_points.items(): print(f'- {color_message(entry_point, Color.BOLD)} ({len(config)} components)') + + +@task +def compute_gitlab_ci_config( + ctx, + before: str | None = None, + after: str | None = None, + before_file: str = 'before.gitlab-ci.yml', + after_file: str = 'after.gitlab-ci.yml', + diff_file: str = 'diff.gitlab-ci.yml', +): + """ + Will compute the Gitlab CI full configuration for the current commit and the base commit and will compute the diff between them. + """ + + before_config, after_config, diff = compute_gitlab_ci_config_diff(ctx, before, after) + + print('Writing', before_file) + with open(before_file, 'w') as f: + f.write(yaml.safe_dump(before_config)) + + print('Writing', after_file) + with open(after_file, 'w') as f: + f.write(yaml.safe_dump(after_config)) + + print('Writing', diff_file) + with open(diff_file, 'w') as f: + f.write(yaml.safe_dump(diff.to_dict())) diff --git a/tasks/go.py b/tasks/go.py index c90f0dcdb174c..8036a313d86e7 100644 --- a/tasks/go.py +++ b/tasks/go.py @@ -450,7 +450,7 @@ def tidy(ctx): @task def check_go_version(ctx): go_version_output = ctx.run('go version') - # result is like "go version go1.22.6 linux/amd64" + # result is like "go version go1.22.7 linux/amd64" running_go_version = go_version_output.stdout.split(' ')[2] with open(".go-version") as f: diff --git a/tasks/gotest.py b/tasks/gotest.py index ede25f85d4f05..54015e3f7c35b 100644 --- a/tasks/gotest.py +++ b/tasks/gotest.py @@ -34,7 +34,7 @@ from tasks.libs.common.junit_upload_core import enrich_junitxml, produce_junit_tar from tasks.libs.common.utils import clean_nested_paths, get_build_flags, gitlab_section from tasks.libs.releasing.json import _get_release_json_value -from tasks.modules import DEFAULT_MODULES, GoModule +from tasks.modules import DEFAULT_MODULES, GoModule, get_module_by_path from tasks.test_core import ModuleTestResult, process_input_args, process_module_results, test_core from tasks.testwasher import TestWasher from tasks.trace_agent import integration_tests as trace_integration_tests @@ -454,20 +454,14 @@ def get_modified_packages(ctx, build_tags=None, lint=False) -> list[GoModule]: go_mod_modified_modules = set() for modified_file in modified_go_files: - match_precision = 0 - best_module_path = None - - # Since several modules can match the path we take only the most precise one - for module_path in DEFAULT_MODULES: - if module_path in modified_file and len(module_path) > match_precision: - match_precision = len(module_path) - best_module_path = module_path + best_module_path = Path(get_go_module(modified_file)) # Check if the package is in the target list of the module we want to test targeted = False assert best_module_path, f"No module found for {modified_file}" - targets = DEFAULT_MODULES[best_module_path].lint_targets if lint else DEFAULT_MODULES[best_module_path].targets + module = get_module_by_path(best_module_path) + targets = module.lint_targets if lint else module.targets for target in targets: if os.path.normpath(os.path.join(best_module_path, target)) in modified_file: @@ -482,7 +476,7 @@ def get_modified_packages(ctx, build_tags=None, lint=False) -> list[GoModule]: # If we modify the go.mod or go.sum we run the tests for the whole module if modified_file.endswith(".mod") or modified_file.endswith(".sum"): - modules_to_test[best_module_path] = DEFAULT_MODULES[best_module_path] + modules_to_test[best_module_path] = get_module_by_path(best_module_path) go_mod_modified_modules.add(best_module_path) continue @@ -748,7 +742,7 @@ def format_packages(ctx: Context, impacted_packages: set[str], build_tags: list[ modules_to_test = {} for package in packages: - module_path = get_go_module(package).replace("./", "") + module_path = get_go_module(package) # Check if the module is in the target list of the modules we want to test if module_path not in DEFAULT_MODULES or not DEFAULT_MODULES[module_path].condition(): @@ -821,7 +815,7 @@ def get_go_module(path): while path != '/': go_mod_path = os.path.join(path, 'go.mod') if os.path.isfile(go_mod_path): - return path + return os.path.relpath(path) path = os.path.dirname(path) raise Exception(f"No go.mod file found for package at {path}") diff --git a/tasks/install_tasks.py b/tasks/install_tasks.py index 17371da7ab3fe..880fda4146e52 100644 --- a/tasks/install_tasks.py +++ b/tasks/install_tasks.py @@ -8,6 +8,7 @@ from invoke import Context, Exit, task from tasks.libs.ciproviders.github_api import GithubAPI +from tasks.libs.common.color import Color, color_message from tasks.libs.common.go import download_go_dependencies from tasks.libs.common.retry import run_command_with_retry from tasks.libs.common.utils import bin_name, environ, gitlab_section @@ -41,6 +42,7 @@ @task def download_tools(ctx): """Download all Go tools for testing.""" + print(color_message("This command is deprecated, please use `install-tools` instead", Color.ORANGE)) with environ({'GO111MODULE': 'on'}): download_go_dependencies(ctx, paths=list(TOOLS.keys())) diff --git a/tasks/installer.py b/tasks/installer.py index db665f97546ea..9a446d8d8c039 100644 --- a/tasks/installer.py +++ b/tasks/installer.py @@ -3,11 +3,14 @@ """ import os +import shutil from invoke import task +from invoke.exceptions import Exit from tasks.build_tags import filter_incompatible_tags, get_build_tags, get_default_build_tags from tasks.libs.common.utils import REPO_PATH, bin_name, get_build_flags +from tasks.libs.releasing.version import get_version BIN_PATH = os.path.join(".", "bin", "installer") MAJOR_VERSION = '7' @@ -16,6 +19,7 @@ @task def build( ctx, + bootstraper=False, rebuild=False, race=False, install_path=None, @@ -24,6 +28,7 @@ def build( build_exclude=None, go_mod="mod", no_strip_binary=True, + no_cgo=False, ): """ Build the updater. @@ -43,13 +48,64 @@ def build( build_exclude = [] if build_exclude is None else build_exclude.split(",") build_tags = get_build_tags(build_include, build_exclude) + if bootstraper: + build_tags.append("bootstraper") strip_flags = "" if no_strip_binary else "-s -w" race_opt = "-race" if race else "" build_type = "-a" if rebuild else "" go_build_tags = " ".join(build_tags) - updater_bin = os.path.join(BIN_PATH, bin_name("installer")) + installer_bin_name = "installer" + if bootstraper: + installer_bin_name = "bootstraper" + installer_bin = os.path.join(BIN_PATH, bin_name(installer_bin_name)) + + if no_cgo: + env["CGO_ENABLED"] = "0" + cmd = f"go build -mod={go_mod} {race_opt} {build_type} -tags \"{go_build_tags}\" " - cmd += f"-o {updater_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags} {strip_flags}\" {REPO_PATH}/cmd/installer" + cmd += f"-o {installer_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags} {strip_flags}\" {REPO_PATH}/cmd/installer" ctx.run(cmd, env=env) + + +@task +def push_artifact( + ctx, + artifact, + registry, + version="", + tag="latest", + arch="amd64", +): + ''' + Pushes an OCI artifact to a registry. + example: + inv -e installer.push-artifact --artifact "datadog-installer" --registry "docker.io/myregistry" --tag "latest" + ''' + if version == "": + version = get_version(ctx, include_git=True, url_safe=True, major_version='7', include_pipeline_id=True) + + # structural pattern matching is only available in Python 3.10+, which currently fails the `vulture` check + if artifact == 'datadog-agent': + image_name = 'agent-package' + elif artifact == 'datadog-installer': + image_name = 'installer-package' + else: + print("Unexpected artifact") + raise Exit(code=1) + + if os.name == 'nt': + target_os = 'windows' + else: + print('Unexpected os') + raise Exit(code=1) + + datadog_package = shutil.which('datadog-package') + if datadog_package is None: + print('datadog-package could not be found in path') + raise Exit(code=1) + + ctx.run( + f'{datadog_package} push {registry}/{image_name}:{tag} omnibus/pkg/{artifact}-{version}-1-{target_os}-{arch}.oci.tar' + ) diff --git a/tasks/kernel_matrix_testing/README.md b/tasks/kernel_matrix_testing/README.md index 5916fc9fe3516..dcc971585d015 100644 --- a/tasks/kernel_matrix_testing/README.md +++ b/tasks/kernel_matrix_testing/README.md @@ -414,3 +414,36 @@ This will show several tables, skipping the cases where all jobs/tests passed to - For each component (security-agent or system-probe) and vmset (e.g., in system-probe we have `only_tracersuite` and `no_tracersuite` test sets) it will show the jobs that failed and why (e.g., if the job failed due to an infra or a test failure). - Again, for each component and vmset, it will show which tests failed in a table showing in which distros/archs they failed (tests and distros that did not have any failures will not be shown). - For each job that failed due to infra reasons, it will show a summary with quick detection of possible boot causes (e.g., it will show if the VM did not reach the login prompt, or if it didn't get an IP address, etc). + +## Alien VMs +The KMT tasks provided here allow developers to run the system-probe build process, and test setup exactly as in the CI. As such it can be useful to use these tasks to package system-probe and target VMs outside the purview of KMT. For this we can provide a profile representing these "alien" vms, and the invoke tasks will +correctly package system-probe and share with the provided VMs as if they were launch by KMT. This can be useful when a developer wants to use these tasks with local VMs launch with VMware, parallels, etc, or remote VMs launch in ec2 or gcp. + +The format of the profile is a json list of objects representing a vm. For each VM the following information is required: +- ssh_key_path +- IP +- architecture +- name +- ssh_user + +An example of an alien VMs profile: +```json +[ + { + "ssh_key_path": "/home/user/.ssh/some-key.id_rsa", + "ip": "xxx.yyy.aaa.bbb", + "arch": "x86", + "name": "ubuntu-gcp", + "ssh_user": "ubuntu" + } +] +``` + +To target these alien profiles use the `--alien-vms` flag to provide the path to this profile file. +``` +inv -e kmt.build --alien-vms=/tmp/alien.profile +``` + +``` +inv -e kmt.test --packages=./pkg/ebpf --run=TestLockRanges/Hashmap --alien-vms=./alien.profile +``` diff --git a/tasks/kernel_matrix_testing/ci.py b/tasks/kernel_matrix_testing/ci.py index a3cb7c3fc9b39..f6e83fcb6e571 100644 --- a/tasks/kernel_matrix_testing/ci.py +++ b/tasks/kernel_matrix_testing/ci.py @@ -1,10 +1,12 @@ from __future__ import annotations +import datetime import io import json import os import re import tarfile +import urllib.parse import xml.etree.ElementTree as ET from typing import TYPE_CHECKING, overload @@ -250,3 +252,47 @@ def get_all_jobs_for_pipeline(pipeline_id: int | str) -> tuple[list[KMTSetupEnvJ break return setup_jobs, test_jobs + + +def get_kmt_dashboard_links() -> None | list: + stage = os.environ.get("CI_JOB_STAGE") + pipeline = os.environ.get("CI_PIPELINE_ID") + branch = os.environ.get("CI_COMMIT_REF_NAME") + pipeline_start = os.environ.get("CI_PIPELINE_CREATED_AT") + + # Check we're running in Gitlab CI + if pipeline_start is None or branch is None or pipeline is None or stage is None: + return None + + # Check this is a KMT job + if "kernel_matrix_testing" not in stage: + return None + + try: + pipeline_start_date = datetime.datetime.fromisoformat(pipeline_start) + except Exception: + print(f"Error: Could not parse pipeline start date {pipeline_start}") + return None + + dashboard_end = pipeline_start_date + datetime.timedelta(hours=4) + + query_args = { + "fromUser": "false", + "refresh_mode": "paused", + "tpl_var_ci.pipeline.id[0]": pipeline, + "tpl_var_git-branch[0]": branch, + "from_ts": int(pipeline_start_date.timestamp()) * 1000, + "to_ts": int(dashboard_end.timestamp()) * 1000, + "live": "false", + } + + url = f"https://app.datadoghq.com/dashboard/zs9-uia-gsg?{urllib.parse.urlencode(query_args)}" + + return [ + { + "external_link": { + "label": "KMT: Pipeline dashboard", + "url": url, + } + } + ] diff --git a/tasks/kernel_matrix_testing/compiler.py b/tasks/kernel_matrix_testing/compiler.py index ff62f77ba1581..cc093a0cdc795 100644 --- a/tasks/kernel_matrix_testing/compiler.py +++ b/tasks/kernel_matrix_testing/compiler.py @@ -118,7 +118,7 @@ def exec(self, cmd: str, user="compiler", verbose=True, run_dir: PathOrStr | Non self.ensure_running() # Set FORCE_COLOR=1 so that termcolor works in the container - self.ctx.run( + return self.ctx.run( f"docker exec -u {user} -i -e FORCE_COLOR=1 {self.name} bash -c \"{cmd}\"", hide=(not verbose), warn=allow_fail, @@ -221,7 +221,7 @@ def prepare_for_cross_compile(self): # Extract into a .tar file and then use tar to extract the contents to avoid issues # with dpkg-deb not respecting symlinks. self.exec(f"dpkg-deb --fsys-tarfile {header_package_path} > {header_package_path}.tar", user="root") - self.exec(f"tar -h -xvf {header_package_path}.tar -C /", user="root") + self.exec(f"tar -h -xf {header_package_path}.tar -C /", user="root") # Install the corresponding arch compilers self.exec(f"apt update && apt install -y gcc-{target.gcc_arch.replace('_', '-')}-linux-gnu", user="root") diff --git a/tasks/kernel_matrix_testing/infra.py b/tasks/kernel_matrix_testing/infra.py index 3bb2c948c06a5..7a14f14211fe0 100644 --- a/tasks/kernel_matrix_testing/infra.py +++ b/tasks/kernel_matrix_testing/infra.py @@ -4,7 +4,7 @@ import json import os from pathlib import Path -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, TypedDict from invoke.context import Context @@ -100,7 +100,9 @@ def __init__( tag: str, vmset_tags: list[str], ssh_key_path: str | None, + arch: KMTArchNameOrLocal | None, instance: HostInstance, + user: str = "root", ): self.ip = ip self.name = domain_id @@ -108,6 +110,8 @@ def __init__( self.vmset_tags = vmset_tags self.ssh_key = ssh_key_path self.instance = instance + self.arch = arch + self.user = user def run_cmd(self, ctx: Context, cmd: str, allow_fail=False, verbose=False, timeout_sec=None): if timeout_sec is not None: @@ -115,15 +119,19 @@ def run_cmd(self, ctx: Context, cmd: str, allow_fail=False, verbose=False, timeo else: extra_opts = SSH_MULTIPLEX_OPTIONS - run = f"ssh {ssh_options_command(extra_opts)} -o IdentitiesOnly=yes -i {self.ssh_key} root@{self.ip} {{proxy_cmd}} '{cmd}'" + cmd = f"sudo bash -c \"{cmd}\"" if self.user != "root" else cmd + run = f"ssh {ssh_options_command(extra_opts)} -o IdentitiesOnly=yes -i {self.ssh_key} {self.user}@{self.ip} {{proxy_cmd}} '{cmd}'" return self.instance.runner.run_cmd(ctx, self.instance, run, allow_fail, verbose) - def _get_rsync_base(self, exclude: PathOrStr | None) -> str: + def _get_rsync_base(self, exclude: PathOrStr | None, verbose=False) -> str: exclude_arg = "" if exclude is not None: exclude_arg = f"--exclude '{exclude}'" - return f"rsync -e \"ssh {ssh_options_command({'IdentitiesOnly': 'yes'} | SSH_MULTIPLEX_OPTIONS)} {{proxy_cmd}} -i {self.ssh_key}\" -p -rt --exclude='.git*' {exclude_arg} --filter=':- .gitignore'" + verbose_arg = "-vP" if verbose else "" + sudo = "--rsync-path=\"sudo rsync\"" if self.user != "root" else "" + + return f"rsync {sudo} {verbose_arg} -e \"ssh {ssh_options_command({'IdentitiesOnly': 'yes'} | SSH_MULTIPLEX_OPTIONS)} {{proxy_cmd}} -i {self.ssh_key}\" -p -rt --exclude='.git*' {exclude_arg} --filter=':- .gitignore'" def copy( self, @@ -136,10 +144,14 @@ def copy( # Always ensure that the parent directory exists, rsync creates the rest self.run_cmd(ctx, f"mkdir -p {os.path.dirname(target)}", verbose=verbose) - run = self._get_rsync_base(exclude) + f" {source} root@{self.ip}:{target}" + info(f"[+] Copying (HOST: {source}) => (VM: {target})...") + + run = ( + self._get_rsync_base(exclude, verbose=ctx.config.run["echo"]) + f" {source} {self.user}@{self.ip}:{target}" + ) res = self.instance.runner.run_cmd(ctx, self.instance, run, False, verbose) if res: - info(f"[+] (HOST: {source}) => (VM: {target})") + info(f"[+] Copied (HOST: {source}) => (VM: {target})") return res @@ -151,7 +163,9 @@ def download( exclude: PathOrStr | None = None, verbose: bool = False, ): - run = self._get_rsync_base(exclude) + f" root@{self.ip}:{source} {target}" + run = ( + self._get_rsync_base(exclude, verbose=ctx.config.run["echo"]) + f" {self.user}@{self.ip}:{source} {target}" + ) res = self.instance.runner.run_cmd(ctx, self.instance, run, False, verbose) if res: info(f"[+] (VM: {source}) => (HOST: {target})") @@ -200,7 +214,7 @@ def build_infrastructure(stack: str, ssh_key_obj: SSHKey | None = None): # location in the local machine. instance.add_microvm( LibvirtDomain( - vm["ip"], vm["id"], vm["tag"], vm["vmset-tags"], os.fspath(get_kmt_os().ddvm_rsa), instance + vm["ip"], vm["id"], vm["tag"], vm["vmset-tags"], os.fspath(get_kmt_os().ddvm_rsa), arch, instance ) ) @@ -209,6 +223,43 @@ def build_infrastructure(stack: str, ssh_key_obj: SSHKey | None = None): return infra +class AlienVMInfo(TypedDict): + ip: str + ssh_key_path: str + name: str + arch: str + + +AlienInfrastructure = list[AlienVMInfo] + + +def build_alien_infrastructure(alien_vms: Path) -> dict[KMTArchNameOrLocal, HostInstance]: + with open(alien_vms) as f: + profile: AlienInfrastructure = json.load(f) + + # lets pretend all VMs are present locally even if they are not, because we just + # want to bypass the ssh proxying stuff when running commands and copying things + instance = HostInstance("local", "local", None) + for vm in profile: + ssh_user = "root" + if "ssh_user" in vm: + ssh_user = vm["ssh_user"] + instance.add_microvm( + LibvirtDomain( + vm["ip"], + "", + "", + [], + vm["ssh_key_path"], + vm["arch"], + instance, + ssh_user, + ) + ) + + return {"local": instance} + + def get_ssh_key_name(pubkey: Path) -> str | None: parts = pubkey.read_text().split() if len(parts) != 3: diff --git a/tasks/kernel_matrix_testing/vmconfig.py b/tasks/kernel_matrix_testing/vmconfig.py index ff4c4ead46b2a..9f24f44dc2d32 100644 --- a/tasks/kernel_matrix_testing/vmconfig.py +++ b/tasks/kernel_matrix_testing/vmconfig.py @@ -658,7 +658,7 @@ def gen_config_for_stack( ## get all possible (recipe, version, arch) combinations we can support. vmconfig_file = f"{get_kmt_os().stacks_dir}/{stack}/{VMCONFIG}" - if os.path.exists(vmconfig_file): + if os.path.exists(vmconfig_file) and not new: raise Exit( "Editing configuration is current not supported. Destroy the stack first to change the configuration." ) diff --git a/tasks/kmt.py b/tasks/kmt.py index 486ce76fd1976..685e41e0370a6 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -30,6 +30,7 @@ SSH_OPTIONS, HostInstance, LibvirtDomain, + build_alien_infrastructure, build_infrastructure, ensure_key_in_ec2, get_ssh_agent_key_names, @@ -213,6 +214,7 @@ def gen_config_from_ci_pipeline( failed_packages: set[str] = set() failed_tests: set[str] = set() + successful_tests: set[str] = set() for test_job in test_jobs: if test_job.status == "failed" and job.component == vmconfig_template: vm_arch = test_job.arch @@ -222,14 +224,49 @@ def gen_config_from_ci_pipeline( results = test_job.get_test_results() for test, result in results.items(): if result is False: - package, test = test.split(":") + package, test = test.split(":", maxsplit=1) failed_tests.add(test) failed_packages.add(package) + elif result is True: # It can also be None if the test was skipped + successful_tests.add(test) vm_name = f"{vm_arch}-{test_job.distro}-distro" info(f"[+] Adding {vm_name} from failed job {test_job.name}") vms.add(vm_name) + # Simplify the failed tests so that we show only the parent tests with all failures below + # and not all child tests that failed + # Not at all the most efficient way to do this, but it works for the amount of data we have + # and is simple enough + successful_tests = successful_tests.difference(failed_tests) + coalesced_failed_tests: set[str] = set() + non_coalesced_failed_tests: set[str] = set() + for test in sorted(failed_tests): # Sort to have parent tests first + is_included = False + + # Check if this test is already included in some parent test + for already_coalesced in coalesced_failed_tests: + if test.startswith(already_coalesced): + is_included = True + break + else: + # If not, check if there is a subtest that succeeded. If there is not, + # we assume all children tests of this one failed and we can coalesce them + # into a single one + for succesful_test in successful_tests: + if succesful_test.startswith(test): + # There was a subtest of this one that succeeded, we cannot coalesce + # Add it to the non-coalesced list so that it's not checked as a parent + # and its children will be checked again + non_coalesced_failed_tests.add(test) + is_included = True + break + + if not is_included: + coalesced_failed_tests.add(test) + + failed_tests = non_coalesced_failed_tests | {f"{t}/.*" for t in coalesced_failed_tests} + if len(vms) == 0: raise Exit(f"No failed jobs found in pipeline {pipeline}") @@ -240,7 +277,7 @@ def gen_config_from_ci_pipeline( ctx, stack, ",".join(vms), "", init_stack, vcpu, memory, new, ci, arch, output_file, vmconfig_template, yes=yes ) info("[+] You can run the following command to execute only packages with failed tests") - print(f"inv kmt.test --packages=\"{' '.join(failed_packages)}\" --run='^{'|'.join(failed_tests)}$'") + print(f"inv kmt.test --packages=\"{','.join(failed_packages)}\" --run='^{'|'.join(failed_tests)}$'") @task @@ -374,11 +411,8 @@ def config_ssh_key(ctx: Context): ssh_keys = [] for f in ssh_key_files: - key_comment = get_ssh_key_name(f.with_suffix(".pub")) - if key_comment is None: - warn(f"[x] {f} does not have a valid key name, cannot be used") - else: - ssh_keys.append({'path': os.fspath(f), 'name': key_comment, 'aws_key_name': ''}) + key_name = get_ssh_key_name(f.with_suffix(".pub")) or f.name + ssh_keys.append({'path': os.fspath(f), 'name': key_name, 'aws_key_name': ''}) keys_str = "\n".join([f" - [{i + 1}] {key['name']} (path: {key['path']})" for i, key in enumerate(ssh_keys)]) result = ask(f"[?] Found these valid key files:\n{keys_str}\nChoose one of these files (1-{len(ssh_keys)}): ") @@ -389,6 +423,10 @@ def config_ssh_key(ctx: Context): except IndexError as e: # out of range raise Exit(f"Invalid choice {result}, must be a number between 1 and {len(ssh_keys)} (inclusive)") from e + info("[+] KMT needs this SSH key to be loaded in AWS so that it can be used to access the instances") + info( + "[+] If you haven't loaded it yet, go to https://dtdg.co/aws-sso-prod -> DataDog Sandbox -> EC2 -> Network & Security -> Key Pairs" + ) aws_key_name = ask( f"Enter the key name configured in AWS for this key (leave blank to set the same as the local key name '{ssh_key['name']}'): " ) @@ -458,7 +496,7 @@ def filter_target_domains(vms: str, infra: dict[KMTArchNameOrLocal, HostInstance def get_archs_in_domains(domains: Iterable[LibvirtDomain]) -> set[Arch]: archs: set[Arch] = set() for d in domains: - archs.add(Arch.from_str(d.instance.arch)) + archs.add(Arch.from_str(d.arch)) return archs @@ -530,6 +568,19 @@ def ninja_build_dependencies(ctx: Context, nw: NinjaWriter, kmt_paths: KMTPaths, inputs=[os.path.abspath(f)], ) + vm_metrics_files = glob("test/new-e2e/system-probe/vm-metrics/*.go") + nw.build( + rule="gobin", + pool="gobuild", + outputs=[os.path.join(kmt_paths.dependencies, "vm-metrics")], + implicit=vm_metrics_files, + variables={ + "go": go_path, + "chdir": "cd test/new-e2e/system-probe/vm-metrics", + "env": env_str, + }, + ) + test_json_files = glob("test/new-e2e/system-probe/test-json-review/*.go") nw.build( rule="gobin", @@ -607,10 +658,8 @@ def filter(x: Path): @task def kmt_secagent_prepare( ctx: Context, - vms: str | None = None, stack: str | None = None, arch: Arch | str = "local", - ssh_key: str | None = None, packages: str | None = None, verbose: bool = True, ci: bool = True, @@ -660,6 +709,7 @@ def prepare( ctx: Context, component: Component, vms: str | None = None, + alien_vms: str | None = None, stack: str | None = None, arch: str | Arch = "local", ssh_key: str | None = None, @@ -668,20 +718,39 @@ def prepare( ci=False, compile_only=False, ): - if not ci: - stack = check_and_get_stack(stack) - assert stacks.stack_exists( - stack - ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" - else: - stack = "ci" - arch_obj = Arch.from_str(arch) if arch_obj.kmt_arch not in KMT_SUPPORTED_ARCHS: raise Exit( f"Architecture {arch} (inferred {arch_obj}) is not supported. Supported architectures are amd64 and arm64" ) + if ci: + domains = None + stack = "ci" + return _prepare(ctx, stack, component, arch_obj, packages, verbose, ci, compile_only) + + if alien_vms is not None: + err_msg = f"no alient VMs discovered from provided profile {alien_vms}." + else: + err_msg = f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" + stack = get_kmt_or_alien_stack(ctx, stack, vms, alien_vms) + domains = get_target_domains(ctx, stack, ssh_key, arch_obj, vms, alien_vms) + assert len(domains) > 0, err_msg + + _prepare(ctx, stack, component, arch, packages, verbose, ci, compile_only, domains=domains) + + +def _prepare( + ctx: Context, + stack: str, + component: Component, + arch_obj: Arch, + packages=None, + verbose=True, + ci=False, + compile_only=False, + domains: list[LibvirtDomain] | None = None, +): if not ci: cc = get_compiler(ctx) @@ -697,7 +766,7 @@ def prepare( info(f"[+] Compiling artifacts for {arch_obj}, component = {component}") if component == "security-agent": if ci: - kmt_secagent_prepare(ctx, vms, stack, arch_obj, ssh_key, packages, verbose, ci) + kmt_secagent_prepare(ctx, stack, arch_obj, packages, verbose, ci) else: cc.exec( f"git config --global --add safe.directory {CONTAINER_AGENT_PATH} && inv {inv_echo} kmt.kmt-secagent-prepare --stack={stack} {pkgs} --arch={arch_obj.name}", @@ -752,14 +821,7 @@ def prepare( if ci or compile_only: return - if vms is None or vms == "": - raise Exit("No vms specified to sync with") - - ssh_key_obj = try_get_ssh_key(ctx, ssh_key) - infra = build_infrastructure(stack, ssh_key_obj) - domains = filter_target_domains(vms, infra, arch_obj) - - info(f"[+] Preparing VMs {vms} in stack {stack} for {arch}") + info(f"[+] Preparing VMs in stack {stack} for {arch_obj}") target_instances: list[HostInstance] = [] for d in domains: @@ -811,12 +873,12 @@ def build_run_config(run: str | None, packages: list[str]): return c -def build_target_packages(filter_packages): - all_packages = go_package_dirs(TEST_PACKAGES_LIST, [NPM_TAG, BPF_TAG]) - if filter_packages == []: +def build_target_packages(filter_packages: list[str], build_tags: list[str]): + all_packages = go_package_dirs(TEST_PACKAGES_LIST, build_tags) + if not filter_packages: return all_packages - filter_packages = [os.path.relpath(p) for p in go_package_dirs(filter_packages, [NPM_TAG, BPF_TAG])] + filter_packages = [os.path.relpath(p) for p in go_package_dirs(filter_packages, build_tags)] return [pkg for pkg in all_packages if os.path.relpath(pkg) in filter_packages] @@ -826,9 +888,8 @@ def build_object_files(ctx, fp, arch: Arch): ctx.run(f"ninja -d explain -f {fp}") -def compute_package_dependencies(ctx: Context, packages: list[str]) -> dict[str, set[str]]: +def compute_package_dependencies(ctx: Context, packages: list[str], build_tags: list[str]) -> dict[str, set[str]]: dd_pkg_name = "github.com/DataDog/datadog-agent/" - build_tags = get_sysprobe_buildtags(False, False) pkg_deps: dict[str, set[str]] = defaultdict(set) packages_list = " ".join(packages) @@ -862,7 +923,6 @@ def kmt_sysprobe_prepare( ctx: Context, arch: str | Arch, stack: str | None = None, - kernel_release: str | None = None, packages=None, extra_arguments: str | None = None, ci: bool = False, @@ -895,8 +955,9 @@ def kmt_sysprobe_prepare( build_object_files(ctx, f"{kmt_paths.arch_dir}/kmt-object-files.ninja", arch) info("[+] Computing Go dependencies for test packages...") - target_packages = build_target_packages(filter_pkgs) - pkg_deps = compute_package_dependencies(ctx, target_packages) + build_tags = get_sysprobe_buildtags(False, False) + target_packages = build_target_packages(filter_pkgs, build_tags) + pkg_deps = compute_package_dependencies(ctx, target_packages, build_tags) info("[+] Generating build instructions..") with open(nf_path, 'w') as ninja_file: @@ -915,6 +976,7 @@ def kmt_sysprobe_prepare( ninja_build_dependencies(ctx, nw, kmt_paths, go_path, arch) ninja_copy_ebpf_files(nw, "system-probe", kmt_paths, arch) + build_tags = get_sysprobe_buildtags(False, False) for pkg in target_packages: pkg_name = os.path.relpath(pkg, os.getcwd()) target_path = os.path.join(kmt_paths.sysprobe_tests, pkg_name) @@ -922,7 +984,7 @@ def kmt_sysprobe_prepare( variables = { "env": env_str, "go": go_path, - "build_tags": get_sysprobe_buildtags(False, False), + "build_tags": build_tags, } timeout = get_test_timeout(os.path.relpath(pkg, os.getcwd())) if timeout: @@ -954,9 +1016,9 @@ def kmt_sysprobe_prepare( rule="copyfiles", ) - # handle testutils and testdata seperately since they are + # handle testutils and testdata separately since they are # shared across packages - target_pkgs = build_target_packages([]) + target_pkgs = build_target_packages([], build_tags) for pkg in target_pkgs: target_path = os.path.join(kmt_paths.sysprobe_tests, os.path.relpath(pkg, os.getcwd())) @@ -1035,6 +1097,33 @@ def images_matching_ci(_: Context, domains: list[LibvirtDomain]): return len(not_matches) == 0 +def get_target_domains(ctx, stack, ssh_key, arch_obj, vms, alien_vms) -> list[LibvirtDomain]: + def _get_infrastructure(ctx, stack, ssh_key, vms, alien_vms): + if alien_vms: + alien_vms_path = Path(alien_vms) + if not alien_vms_path.exists(): + raise Exit(f"No alien VMs profile found @ {alien_vms_path}") + return build_alien_infrastructure(alien_vms_path) + + ssh_key_obj = try_get_ssh_key(ctx, ssh_key) + return build_infrastructure(stack, ssh_key_obj) + + if vms is None and alien_vms is None: + vms = ",".join(stacks.get_all_vms_in_stack(stack)) + info(f"[+] running tests on all vms in stack {stack}: vms={vms}") + + infra = _get_infrastructure(ctx, stack, ssh_key, vms, alien_vms) + if alien_vms is not None: + return infra["local"].microvms + + domains = filter_target_domains(vms, infra, arch_obj) + if not images_matching_ci(ctx, domains): + if ask("Some VMs do not match version in CI. Continue anyway [y/N]") != "y": + raise Exit("[-] Aborting due to version mismatch") + + return domains + + @task( help={ "vms": "Comma seperated list of vms to target when running tests. If None, run against all vms", @@ -1055,6 +1144,7 @@ def test( ctx: Context, component: str = "system-probe", vms: str | None = None, + alien_vms: str | None = None, stack: str | None = None, packages=None, run: str | None = None, @@ -1067,39 +1157,30 @@ def test( test_extra_arguments=None, test_extra_env=None, ): - stack = check_and_get_stack(stack) - assert stacks.stack_exists( - stack - ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" - - if vms is None: - vms = ",".join(stacks.get_all_vms_in_stack(stack)) - info(f"[+] Running tests on all VMs in stack {stack}: vms={vms}") - - ssh_key_obj = try_get_ssh_key(ctx, ssh_key) - infra = build_infrastructure(stack, ssh_key_obj) - domains = filter_target_domains(vms, infra) + stack = get_kmt_or_alien_stack(ctx, stack, vms, alien_vms) + domains = get_target_domains(ctx, stack, ssh_key, None, vms, alien_vms) used_archs = get_archs_in_domains(domains) - if not images_matching_ci(ctx, domains): - if ask("Some VMs do not match version in CI. Continue anyway [y/N]") != "y": - return + if alien_vms is not None: + err_msg = f"no alient VMs discovered from provided profile {alien_vms}." + else: + err_msg = f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" - assert len(domains) > 0, f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" + assert len(domains) > 0, err_msg info("[+] Detected architectures in target VMs: " + ", ".join(map(str, used_archs))) if not quick: for arch in used_archs: info(f"[+] Preparing {component} for {arch}") - prepare(ctx, component, stack=stack, vms=vms, packages=packages, ssh_key=ssh_key, arch=arch) + _prepare(ctx, stack, component, arch, packages=packages, verbose=verbose, domains=domains) if run is not None and packages is None: raise Exit("Package must be provided when specifying test") pkgs = [] if packages is not None: - pkgs = [os.path.relpath(p) for p in go_package_dirs(packages.split(","), [NPM_TAG, BPF_TAG])] + pkgs = [os.path.relpath(os.path.realpath(p)) for p in go_package_dirs(packages.split(","), [NPM_TAG, BPF_TAG])] if run is not None and len(pkgs) > 1: raise Exit("Only a single package can be specified when running specific tests") @@ -1162,6 +1243,22 @@ def build_layout(ctx, domains, layout: str, verbose: bool): d.run_cmd(ctx, cmd, verbose) +def get_kmt_or_alien_stack(ctx, stack, vms, alien_vms): + assert not (vms is not None and alien_vms is not None), "target VMs can be either KMT VMs or alien VMs, not both" + + if alien_vms is not None and vms is None: + stack = check_and_get_stack("alien-stack") + if not stacks.stack_exists(stack): + stacks.create_stack(ctx, stack) + return stack + + stack = check_and_get_stack(stack) + assert stacks.stack_exists( + stack + ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" + return stack + + @task( help={ "vms": "Comma seperated list of vms to target when running tests", @@ -1176,6 +1273,7 @@ def build_layout(ctx, domains, layout: str, verbose: bool): def build( ctx: Context, vms: str | None = None, + alien_vms: str | None = None, stack: str | None = None, ssh_key: str | None = None, verbose=True, @@ -1185,10 +1283,7 @@ def build( compile_only=False, override_agent=False, ): - stack = check_and_get_stack(stack) - assert stacks.stack_exists( - stack - ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" + stack = get_kmt_or_alien_stack(ctx, stack, vms, alien_vms) if arch is None: arch = "local" @@ -1212,26 +1307,30 @@ def build( if compile_only: return - if vms is None: - vms = ",".join(stacks.get_all_vms_in_stack(stack)) - assert os.path.exists(layout), f"File {layout} does not exist" - ssh_key_obj = try_get_ssh_key(ctx, ssh_key) - infra = build_infrastructure(stack, ssh_key_obj) - domains = filter_target_domains(vms, infra, arch_obj) + domains = get_target_domains(ctx, stack, ssh_key, arch_obj, vms, alien_vms) + if alien_vms is not None: + err_msg = f"no alient VMs discovered from provided profile {alien_vms}." + else: + err_msg = f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" - if not images_matching_ci(ctx, domains): - if ask("Some VMs do not match version in CI. Continue anyway [y/N]") != "y": - return + assert len(domains) > 0, err_msg - assert len(domains) > 0, f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" + llc_path = paths.tools / "llc-bpf" + clang_path = paths.tools / "clang-bpf" + setup_runtime_clang(ctx, arch_obj, paths.tools) build_layout(ctx, domains, layout, verbose) for d in domains: + # Copy embedded tools, make them + embedded_remote_path = Path("/opt/datadog-agent/embedded/bin") + d.copy(ctx, llc_path, embedded_remote_path / llc_path.name, verbose=verbose) + d.copy(ctx, clang_path, embedded_remote_path / clang_path.name, verbose=verbose) + if override_agent: d.run_cmd(ctx, f"[ -f /opt/datadog-agent/embedded/bin/{component} ]", verbose=False) - d.copy(ctx, f"./bin/{component}/{component}", "/opt/datadog-agent/embedded/bin/{component}") + d.copy(ctx, f"./bin/{component}/{component}", f"/opt/datadog-agent/embedded/bin/{component}") else: d.copy(ctx, f"./bin/{component}", "/root/") @@ -1285,7 +1384,7 @@ def ssh_config( # Ensure correct permissions of the ddvm_rsa file if we're using # it to connect to VMs. This attribute change doesn't seem to be tracked # in git correctly - ctx.run(f"chmod 600 {ddvm_rsa}") + ctx.run(f"chmod 600 {ddvm_rsa}", echo=False) for stack_dir in stacks_dir.iterdir(): if not stack_dir.is_dir(): @@ -1848,6 +1947,7 @@ def show_last_test_results(ctx: Context, stack: str | None = None): vm_list: list[str] = [] total_by_vm: dict[str, tuple[int, int, int, int]] = defaultdict(lambda: (0, 0, 0, 0)) sum_failures = 0 + sum_tests = 0 for vm_folder in paths.test_results.iterdir(): if not vm_folder.is_dir(): @@ -1883,6 +1983,7 @@ def show_last_test_results(ctx: Context, stack: str | None = None): for testresults in tests.values(): if len(testresults) == 1: result = next(iter(testresults)) + sum_tests += 1 if result == "failed": failures += 1 sum_failures += 1 @@ -1913,8 +2014,16 @@ def _color_result(result: tuple[int, int, int, int]) -> str: table.append(["Total"] + [_color_result(total_by_vm[vm]) for vm in vm_list]) - print(tabulate(table, headers=["Package"] + vm_list)) - print("\nLegend: Successes/Successes on retry/Failures/Skipped") + print(tabulate(table, headers=["Package"] + vm_list) + "\n") + + if sum_tests == 0: + warn("WARN: No test runs") + elif sum_failures > 0: + error("ERROR: Found failed tests") + else: + info("SUCCESS: All tests passed") + + print("Legend: Successes/Successes on retry/Failures/Skipped") if sum_failures: sys.exit(1) @@ -2059,6 +2168,7 @@ def install_ddagent( ctx: Context, api_key: str, vms: str | None = None, + alien_vms: str | None = None, stack: str | None = None, ssh_key: str | None = None, verbose=True, @@ -2067,24 +2177,20 @@ def install_ddagent( datadog_yaml: str | None = None, layout: str | None = None, ): - stack = check_and_get_stack(stack) - assert stacks.stack_exists( - stack - ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" + stack = get_kmt_or_alien_stack(ctx, stack, vms, alien_vms) if arch is None: arch = "local" arch_obj = Arch.from_str(arch) - if vms is None: - vms = ",".join(stacks.get_all_vms_in_stack(stack)) - - ssh_key_obj = try_get_ssh_key(ctx, ssh_key) - infra = build_infrastructure(stack, ssh_key_obj) - domains = filter_target_domains(vms, infra, arch_obj) + domains = get_target_domains(ctx, stack, ssh_key, arch_obj, vms, alien_vms) + if alien_vms is not None: + err_msg = f"no alient VMs discovered from provided profile {alien_vms}." + else: + err_msg = f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" - assert len(domains) > 0, f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" + assert len(domains) > 0, err_msg if version is not None: check_version(version) diff --git a/tasks/libs/ciproviders/gitlab_api.py b/tasks/libs/ciproviders/gitlab_api.py index 8c940db6479af..3a416b92574f4 100644 --- a/tasks/libs/ciproviders/gitlab_api.py +++ b/tasks/libs/ciproviders/gitlab_api.py @@ -18,6 +18,7 @@ from invoke.exceptions import Exit from tasks.libs.common.color import Color, color_message +from tasks.libs.common.constants import DEFAULT_BRANCH from tasks.libs.common.git import get_common_ancestor, get_current_branch from tasks.libs.common.utils import retry_function @@ -116,20 +117,64 @@ def refresh_pipeline(pipeline: ProjectPipeline): class GitlabCIDiff: - def __init__(self, before: dict, after: dict) -> None: + def __init__( + self, + before: dict | None = None, + after: dict | None = None, + added: set[str] | None = None, + removed: set[str] | None = None, + modified: set[str] | None = None, + renamed: set[tuple[str, str]] | None = None, + modified_diffs: dict[str, list[str]] | None = None, + added_contents: dict[str, str] | None = None, + ) -> None: """ Used to display job diffs between two gitlab ci configurations """ - self.before = before - self.after = after - self.added_contents = {} - self.modified_diffs = {} - - self.make_diff() + self.before = before or {} + self.after = after or {} + self.added = added or set() + self.removed = removed or set() + self.modified = modified or set() + self.renamed = renamed or set() + self.modified_diffs = modified_diffs or {} + self.added_contents = added_contents or {} def __bool__(self) -> bool: return bool(self.added or self.removed or self.modified or self.renamed) + def to_dict(self) -> dict: + return { + 'before': self.before, + 'after': self.after, + 'added': self.added, + 'removed': self.removed, + 'modified': self.modified, + 'renamed': list(self.renamed), + 'modied_diffs': self.modified_diffs, + 'added_contents': self.added_contents, + } + + @staticmethod + def from_dict(data: dict) -> GitlabCIDiff: + return GitlabCIDiff( + before=data['before'], + after=data['after'], + added=set(data['added']), + removed=set(data['removed']), + modified=set(data['modified']), + renamed=set(data['renamed']), + modified_diffs=data['modied_diffs'], + added_contents=data['added_contents'], + ) + + @staticmethod + def from_contents(before: dict | None = None, after: dict | None = None) -> GitlabCIDiff: + diff = GitlabCIDiff(before, after) + diff.make_diff() + + return diff + def make_diff(self): """ Compute the diff between the two gitlab ci configurations @@ -294,7 +339,7 @@ def str_note() -> list[str]: if only_summary: if not cli: - res.append(':warning: Diff too large to display on Github') + res.append(':warning: Diff too large to display on Github.') else: if self.modified: wrap = len(self.modified) > max_detailed_jobs @@ -336,6 +381,27 @@ def str_note() -> list[str]: return '\n'.join(res) + def iter_jobs(self, added=True, modified=True, removed=False): + """ + Will iterate over all jobs in all files for the given states + + Returns a tuple of (job_name, contents, state) + + Note that the contents of the job is the contents after modification if modified or before removal if removed + """ + + if added: + for job in self.added: + yield job, self.after[job], 'added' + + if modified: + for job in self.modified: + yield job, self.after[job], 'modified' + + if removed: + for job in self.removed: + yield job, self.before[job], 'removed' + class MultiGitlabCIDiff: @dataclass @@ -346,25 +412,59 @@ class MultiDiff: is_added: bool is_removed: bool - def __init__(self, before: dict[str, dict], after: dict[str, dict]) -> None: + def to_dict(self) -> dict: + return { + 'entry_point': self.entry_point, + 'diff': self.diff.to_dict(), + 'is_added': self.is_added, + 'is_removed': self.is_removed, + } + + @staticmethod + def from_dict(data: dict) -> MultiGitlabCIDiff.MultiDiff: + return MultiGitlabCIDiff.MultiDiff( + data['entry_point'], GitlabCIDiff.from_dict(data['diff']), data['is_added'], data['is_removed'] + ) + + def __init__( + self, + before: dict[str, dict] | None = None, + after: dict[str, dict] | None = None, + diffs: list[MultiGitlabCIDiff.MultiDiff] | None = None, + ) -> None: """ Used to display job diffs between two full gitlab ci configurations (multiple entry points) - before/after: Dict of [entry point] -> ([job name] -> job content) """ - self.before = dict(before) - self.after = dict(after) - - self.diffs: list[MultiGitlabCIDiff.MultiDiff] = [] - - self.make_diff() + self.before = before + self.after = after + self.diffs = diffs or [] def __bool__(self) -> bool: return bool(self.diffs) + def to_dict(self) -> dict: + return {'before': self.before, 'after': self.after, 'diffs': [diff.to_dict() for diff in self.diffs]} + + @staticmethod + def from_dict(data: dict) -> MultiGitlabCIDiff: + return MultiGitlabCIDiff( + data['before'], data['after'], [MultiGitlabCIDiff.MultiDiff.from_dict(d) for d in data['diffs']] + ) + + @staticmethod + def from_contents(before: dict[str, dict] | None = None, after: dict[str, dict] | None = None) -> MultiGitlabCIDiff: + diff = MultiGitlabCIDiff(before, after) + diff.make_diff() + + return diff + def make_diff(self): + self.diffs = [] + for entry_point in set(list(self.before) + list(self.after)): - diff = GitlabCIDiff(self.before.get(entry_point, {}), self.after.get(entry_point, {})) + diff = GitlabCIDiff.from_contents(self.before.get(entry_point, {}), self.after.get(entry_point, {})) # Diff for this entry point, add it to the list if diff: @@ -382,7 +482,7 @@ def display(self, cli: bool = True, job_url: str = None, **kwargs) -> str: return '' if len(self.diffs) == 1: - return self.diffs[0].diff.display(cli, **kwargs) + return self.diffs[0].diff.display(cli, job_url=job_url, **kwargs) def str_entry(diff: MultiGitlabCIDiff.MultiDiff) -> str: if cli: @@ -428,6 +528,19 @@ def str_entry_end() -> list[str]: return '\n'.join(res) + def iter_jobs(self, added=True, modified=True, removed=False): + """ + Will iterate over all jobs in all files for the given states + + Returns a tuple of (entry_point, job_name, contents, state) + + Note that the contents is the contents after modification or before removal + """ + + for diff in self.diffs: + for job, contents, state in diff.diff.iter_jobs(added=added, modified=modified, removed=removed): + yield diff.entry_point, job, contents, state + class ReferenceTag(yaml.YAMLObject): """ @@ -607,78 +720,102 @@ def print_gitlab_ci_configuration(yml: dict, sort_jobs: bool): yaml.safe_dump({job: content}, sys.stdout, default_flow_style=False, sort_keys=True, indent=2) +def test_gitlab_configuration(ctx, entry_point, input_config, context=None): + agent = get_gitlab_repo() + # Update config and lint it + config = generate_gitlab_full_configuration(ctx, entry_point, context=context, input_config=input_config) + res = agent.ci_lint.create({"content": config, "dry_run": True, "include_jobs": True}) + status = color_message("valid", "green") if res.valid else color_message("invalid", "red") + + print(f"{color_message(entry_point, Color.BOLD)} config is {status}") + if len(res.warnings) > 0: + print( + f'{color_message("warning", Color.ORANGE)}: {color_message(entry_point, Color.BOLD)}: {res.warnings})', + file=sys.stderr, + ) + if not res.valid: + print( + f'{color_message("error", Color.RED)}: {color_message(entry_point, Color.BOLD)}: {res.errors})', + file=sys.stderr, + ) + raise Exit(code=1) + + def get_all_gitlab_ci_configurations( ctx, input_file: str = '.gitlab-ci.yml', filter_configs: bool = False, clean_configs: bool = False, - ignore_errors: bool = False, + with_lint: bool = True, git_ref: str | None = None, ) -> dict[str, dict]: """ - Returns all gitlab-ci configurations from each entry points (.gitlab-ci.yml and files that are triggered) + Returns all gitlab-ci configurations from each configuration file (.gitlab-ci.yml and files called with the `trigger` keyword) - filter_configs: Whether to apply post process filtering to the configurations (get only jobs...) - clean_configs: Whether to apply post process cleaning to the configurations (remove extends, flatten lists of lists...) - ignore_errors: Ignore gitlab lint errors - git_ref: If provided, use this git reference to fetch the configuration """ - # entry_points[input_file] -> parsed config - entry_points: dict[str, dict] = {} + # configurations[input_file] -> parsed config + configurations: dict[str, dict] = {} - def get_triggers(node): - """ - Get all trigger local files - """ - if isinstance(node, str): - return [node] - elif isinstance(node, dict): - return [node['local']] if 'local' in node else [] - elif isinstance(node, list): - res = [] - for n in node: - res.extend(get_triggers(n)) + # Traverse all gitlab-ci configurations + get_ci_configurations(input_file, configurations=configurations, ctx=ctx, with_lint=with_lint, git_ref=git_ref) + # Post process + for file_name, config in configurations.items(): + if filter_configs: + config = filter_gitlab_ci_configuration(config) - return res + if clean_configs: + config = clean_gitlab_ci_configuration(config) - def get_entry_points(input_file): - """ - DFS to get all entry points from the input file - """ - if input_file in entry_points: - return + configurations[file_name] = config - # Read and parse the configuration from this entry point - config = get_full_gitlab_ci_configuration(ctx, input_file, ignore_errors=ignore_errors, git_ref=git_ref) - entry_points[input_file] = config + return configurations - # Add entry points from triggers - for job in config.values(): - if 'trigger' in job and 'include' in job['trigger']: - for trigger in get_triggers(job['trigger']['include']): - get_entry_points(trigger) - # Find all entry points - get_entry_points(input_file) +def get_ci_configurations(input_file, configurations, ctx, with_lint, git_ref): + """ + DFS to get all distinct configurations from input files + """ + if input_file in configurations: + return - # Post process - for entry_point, config in entry_points.items(): - if filter_configs: - config = filter_gitlab_ci_configuration(config) + # Read and parse the configuration from this input_file + config = resolve_gitlab_ci_configuration(ctx, input_file, with_lint=with_lint, git_ref=git_ref) + configurations[input_file] = config + + # Search and add configurations called by the trigger keyword + for job in config.values(): + if 'trigger' in job and 'include' in job['trigger']: + for trigger in get_trigger_filenames(job['trigger']['include']): + get_ci_configurations( + trigger, configurations=configurations, ctx=ctx, with_lint=with_lint, git_ref=git_ref + ) - if clean_configs: - config = clean_gitlab_ci_configuration(config) - entry_points[entry_point] = config +def get_trigger_filenames(node): + """ + Get all trigger downstream pipelines defined by the `trigger` key in the gitlab-ci configuration + """ + if isinstance(node, str): + return [node] + elif isinstance(node, dict): + return [node['local']] if 'local' in node else [] + elif isinstance(node, list): + res = [] + for n in node: + res.extend(get_trigger_filenames(n)) - return entry_points + return res -def get_full_gitlab_ci_configuration( +def resolve_gitlab_ci_configuration( ctx, input_file: str = '.gitlab-ci.yml', return_dict: bool = True, - ignore_errors: bool = False, + with_lint: bool = True, git_ref: str | None = None, input_config: dict | None = None, ) -> str | dict: @@ -695,23 +832,26 @@ def get_full_gitlab_ci_configuration( else: concat_config = input_config - agent = get_gitlab_repo() - res = agent.ci_lint.create({"content": yaml.safe_dump(concat_config), "dry_run": True, "include_jobs": True}) + if with_lint: + agent = get_gitlab_repo() + res = agent.ci_lint.create({"content": yaml.safe_dump(concat_config), "dry_run": True, "include_jobs": True}) - if not ignore_errors and not res.valid: - errors = '; '.join(res.errors) - raise RuntimeError(f"{color_message('Invalid configuration', Color.RED)}: {errors}") + if not res.valid: + errors = '; '.join(res.errors) + raise RuntimeError(f"{color_message('Invalid configuration', Color.RED)}: {errors}") - if return_dict: - return yaml.safe_load(res.merged_yaml) + if return_dict: + return yaml.safe_load(res.merged_yaml) + else: + return res.merged_yaml else: - return res.merged_yaml + return concat_config def get_gitlab_ci_configuration( ctx, input_file: str = '.gitlab-ci.yml', - ignore_errors: bool = False, + with_lint: bool = True, job: str | None = None, keep_special_objects: bool = False, clean: bool = True, @@ -726,7 +866,7 @@ def get_gitlab_ci_configuration( """ # Make full configuration - yml = get_full_gitlab_ci_configuration(ctx, input_file, ignore_errors=ignore_errors, git_ref=git_ref) + yml = resolve_gitlab_ci_configuration(ctx, input_file, with_lint=with_lint, git_ref=git_ref) # Filter yml = filter_gitlab_ci_configuration(yml, job, keep_special_objects=keep_special_objects) @@ -756,7 +896,7 @@ def generate_gitlab_full_configuration( - input_config: If not None, will use this config instead of parsing existing yaml file at `input_file` """ if apply_postprocessing: - full_configuration = get_full_gitlab_ci_configuration(ctx, input_file, input_config=input_config) + full_configuration = resolve_gitlab_ci_configuration(ctx, input_file, input_config=input_config) elif input_config: full_configuration = deepcopy(input_config) else: @@ -764,7 +904,7 @@ def generate_gitlab_full_configuration( # Override some variables with a dedicated context if context: - full_configuration['variables'] = full_configuration.get('variables', {}).update(context) + full_configuration.get('variables', {}).update(context) if compare_to: for value in full_configuration.values(): if ( @@ -896,6 +1036,15 @@ def get_preset_contexts(required_tests): ("CI_PIPELINE_SOURCE", ["pipeline"]), # ["trigger", "pipeline", "schedule"] ("DDR_WORKFLOW_ID", ["true"]), ] + integrations_core_contexts = [ + ("RELEASE_VERSION_6", ["nightly"]), + ("RELEASE_VERSION_7", ["nightly-a7"]), + ("BUCKET_BRANCH", ["dev"]), + ("DEPLOY_AGENT", ["false"]), + ("INTEGRATIONS_CORE_VERSION", ["foo/bar"]), + ("RUN_KITCHEN_TESTS", ["false"]), + ("RUN_E2E_TESTS", ["off"]), + ] all_contexts = [] for test in required_tests: if test in ["all", "main"]: @@ -906,6 +1055,8 @@ def get_preset_contexts(required_tests): generate_contexts(mq_contexts, [], all_contexts) if test in ["all", "conductor"]: generate_contexts(conductor_contexts, [], all_contexts) + if test in ["all", "integrations"]: + generate_contexts(integrations_core_contexts, [], all_contexts) return all_contexts @@ -983,7 +1134,7 @@ def gitlab_configuration_is_modified(ctx): print(f"Found a gitlab configuration file: {new_file}") else: in_config = False - if in_config and line.startswith("@@"): + if in_config and line.startswith("@@") and os.path.exists(new_file): lines = changed_lines.match(line) start = int(lines.group(1)) with open(new_file) as f: @@ -1010,3 +1161,27 @@ def gitlab_configuration_is_modified(ctx): return True return False + + +def compute_gitlab_ci_config_diff(ctx, before: str, after: str): + """ + Computes the full configs and the diff between two git references. + The "after reference" is compared to the Lowest Common Ancestor (LCA) commit of "before reference" and "after reference". + """ + + before_name = before or "merge base" + after_name = after or "local files" + + # The before commit is the LCA commit between before and after + before = before or DEFAULT_BRANCH + before = get_common_ancestor(ctx, before, after or "HEAD") + + print(f'Getting after changes config ({color_message(after_name, Color.BOLD)})') + after_config = get_all_gitlab_ci_configurations(ctx, git_ref=after, clean_configs=True) + + print(f'Getting before changes config ({color_message(before_name, Color.BOLD)})') + before_config = get_all_gitlab_ci_configurations(ctx, git_ref=before, clean_configs=True) + + diff = MultiGitlabCIDiff.from_contents(before_config, after_config) + + return before_config, after_config, diff diff --git a/tasks/libs/common/git.py b/tasks/libs/common/git.py index c16a5c42e0cbe..8363f08ad9496 100644 --- a/tasks/libs/common/git.py +++ b/tasks/libs/common/git.py @@ -54,8 +54,8 @@ def get_current_branch(ctx) -> str: return ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() -def get_common_ancestor(ctx, branch) -> str: - return ctx.run(f"git merge-base {branch} main", hide=True).stdout.strip() +def get_common_ancestor(ctx, branch, base=DEFAULT_BRANCH) -> str: + return ctx.run(f"git merge-base {branch} {base}", hide=True).stdout.strip() def check_uncommitted_changes(ctx): @@ -167,7 +167,12 @@ def get_last_commit(ctx, repo, branch): ) -def get_last_tag(ctx, repo, pattern): +def get_last_release_tag(ctx, repo, pattern): + import re + from functools import cmp_to_key + + import semver + tags = ctx.run( rf'git ls-remote -t https://github.com/DataDog/{repo} "{pattern}"', hide=True, @@ -180,9 +185,24 @@ def get_last_tag(ctx, repo, pattern): ), code=1, ) - last_tag = tags.splitlines()[-1] + + release_pattern = re.compile(r'.*7\.[0-9]+\.[0-9]+(-rc.*|-devel.*)?$') + tags_without_suffix = [ + line for line in tags.splitlines() if not line.endswith("^{}") and release_pattern.match(line) + ] + last_tag = max(tags_without_suffix, key=lambda x: cmp_to_key(semver.compare)(x.split('/')[-1])) last_tag_commit, last_tag_name = last_tag.split() - if last_tag_name.endswith("^{}"): - last_tag_name = last_tag_name.removesuffix("^{}") + tags_with_suffix = [line for line in tags.splitlines() if line.endswith("^{}") and release_pattern.match(line)] + if tags_with_suffix: + last_tag_with_suffix = max( + tags_with_suffix, key=lambda x: cmp_to_key(semver.compare)(x.split('/')[-1].removesuffix("^{}")) + ) + last_tag_commit_with_suffix, last_tag_name_with_suffix = last_tag_with_suffix.split() + if ( + semver.compare(last_tag_name_with_suffix.split('/')[-1].removesuffix("^{}"), last_tag_name.split("/")[-1]) + >= 0 + ): + last_tag_commit = last_tag_commit_with_suffix + last_tag_name = last_tag_name_with_suffix.removesuffix("^{}") last_tag_name = last_tag_name.removeprefix("refs/tags/") return last_tag_commit, last_tag_name diff --git a/tasks/libs/common/go.py b/tasks/libs/common/go.py index 6ee7f6bf0834d..61b5ecdb34784 100644 --- a/tasks/libs/common/go.py +++ b/tasks/libs/common/go.py @@ -11,8 +11,10 @@ def download_go_dependencies(ctx: Context, paths: list[str], verbose: bool = False, max_retry: int = 3): print("downloading dependencies") - with timed("go mod download"): + with timed("go mod download && go mod tidy"): verbosity = ' -x' if verbose else '' for path in paths: with ctx.cd(path): - run_command_with_retry(ctx, f"go mod download{verbosity}", max_retry=max_retry) + run_command_with_retry( + ctx, f"go mod download{verbosity} && go mod tidy{verbosity}", max_retry=max_retry + ) diff --git a/tasks/libs/common/omnibus.py b/tasks/libs/common/omnibus.py index c8c59478738e1..d91b2649d27ab 100644 --- a/tasks/libs/common/omnibus.py +++ b/tasks/libs/common/omnibus.py @@ -95,7 +95,7 @@ def env_filter(item): "BUILD_HOOK", "BUNDLE_MIRROR__RUBYGEMS__ORG", "BUCKET_BRANCH", - "CHANGELOG_COMMIT_SHA_SSM_NAME", + "CHANGELOG_COMMIT_SHA", "CLANG_LLVM_VER", "CHANNEL", "CHART", @@ -121,7 +121,7 @@ def env_filter(item): "HOSTNAME", "HOST_IP", "INFOPATH", - "INSTALL_SCRIPT_API_KEY_SSM_NAME", + "INSTALL_SCRIPT_API_KEY", "INTEGRATION_WHEELS_CACHE_BUCKET", "IRBRC", "KITCHEN_INFRASTRUCTURE_FLAKES_RETRY", @@ -159,11 +159,11 @@ def env_filter(item): "USERDOMAIN", "USERNAME", "USERPROFILE", - "VCPKG_BLOB_SAS_URL_SSM_NAME", + "VCPKG_BLOB_SAS_URL", "VERSION", "VM_ASSETS", "WIN_S3_BUCKET", - "WINGET_PAT_SSM_NAME", + "WINGET_PAT", "WORKFLOW", "_", "build_before", @@ -318,7 +318,7 @@ def send_build_metrics(ctx, overall_duration): } ) dd_api_key = ctx.run( - f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2_SSM_NAME"]} --with-decryption --query "Parameter.Value" --out text', + f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2"]} --with-decryption --query "Parameter.Value" --out text', hide=True, ).stdout.strip() headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'DD-API-KEY': dd_api_key} @@ -336,7 +336,7 @@ def send_cache_miss_event(ctx, pipeline_id, job_name, job_id): else: aws_cmd = "aws" dd_api_key = ctx.run( - f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2_SSM_NAME"]} --with-decryption --query "Parameter.Value" --out text', + f'{aws_cmd} ssm get-parameter --region us-east-1 --name {os.environ["API_KEY_ORG2"]} --with-decryption --query "Parameter.Value" --out text', hide=True, ).stdout.strip() headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'DD-API-KEY': dd_api_key} diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py index f2de7730ed88d..9b8138176a624 100644 --- a/tasks/libs/common/utils.py +++ b/tasks/libs/common/utils.py @@ -8,6 +8,7 @@ import platform import re import sys +import tempfile import time import traceback from collections import Counter @@ -18,15 +19,12 @@ from subprocess import CalledProcessError, check_output from types import SimpleNamespace +import requests from invoke.context import Context from invoke.exceptions import Exit from tasks.libs.common.color import Color, color_message -from tasks.libs.common.constants import ( - ALLOWED_REPO_ALL_BRANCHES, - DEFAULT_BRANCH, - REPO_PATH, -) +from tasks.libs.common.constants import ALLOWED_REPO_ALL_BRANCHES, DEFAULT_BRANCH, REPO_PATH from tasks.libs.common.git import get_commit_sha from tasks.libs.owners.parsing import search_owners from tasks.libs.releasing.version import get_version @@ -334,6 +332,10 @@ def get_build_flags( env["GOARCH"] = arch.go_arch env["CGO_ENABLED"] = "1" # If we're cross-compiling, CGO is disabled by default. Ensure it's always enabled env["CC"] = arch.gcc_compiler() + if os.getenv('DD_CC'): + env['CC'] = os.getenv('DD_CC') + if os.getenv('DD_CXX'): + env['CXX'] = os.getenv('DD_CXX') if extldflags: ldflags += f"'-extldflags={extldflags}' " @@ -699,3 +701,51 @@ def team_to_label(team): 'asm-go': "agent-security", } return dico.get(team, team) + + +@contextmanager +def download_to_tempfile(url, checksum=None): + """ + Download a file from @url to a temporary file and yields the path. + + The temporary file is removed when the context manager exits. + + if @checksum is provided it will be updated with each chunk of the file + """ + fd, tmp_path = tempfile.mkstemp() + try: + with requests.get(url, stream=True) as r: + r.raise_for_status() + with os.fdopen(fd, "wb") as f: + # fd will be closed by context manager, so we no longer need it + fd = None + for chunk in r.iter_content(chunk_size=8192): + if checksum: + checksum.update(chunk) + f.write(chunk) + yield tmp_path + finally: + if fd is not None: + os.close(fd) + if os.path.exists(tmp_path): + os.remove(tmp_path) + + +def experimental(message): + """ + Marks this task as experimental and prints the message. + + Note: This decorator must be placed after the `task` decorator. + """ + + def decorator(f): + @wraps(f) + def wrapper(*args, **kwargs): + fname = f.__name__ + print(color_message(f"Warning: {fname} is experimental: {message}", Color.ORANGE), file=sys.stderr) + + return f(*args, **kwargs) + + return wrapper + + return decorator diff --git a/tasks/libs/notify/jira_failing_tests.py b/tasks/libs/notify/jira_failing_tests.py new file mode 100644 index 0000000000000..b061f49dd4514 --- /dev/null +++ b/tasks/libs/notify/jira_failing_tests.py @@ -0,0 +1,79 @@ +import os + +try: + from atlassian import Jira + from datadog_api_client import ApiClient, Configuration + from datadog_api_client.v2.api.ci_visibility_tests_api import CIVisibilityTestsApi + from datadog_api_client.v2.model.ci_app_aggregate_sort import CIAppAggregateSort + from datadog_api_client.v2.model.ci_app_aggregation_function import CIAppAggregationFunction + from datadog_api_client.v2.model.ci_app_compute import CIAppCompute + from datadog_api_client.v2.model.ci_app_compute_type import CIAppComputeType + from datadog_api_client.v2.model.ci_app_query_options import CIAppQueryOptions + from datadog_api_client.v2.model.ci_app_sort_order import CIAppSortOrder + from datadog_api_client.v2.model.ci_app_tests_aggregate_request import CIAppTestsAggregateRequest + from datadog_api_client.v2.model.ci_app_tests_group_by import CIAppTestsGroupBy + from datadog_api_client.v2.model.ci_app_tests_query_filter import CIAppTestsQueryFilter +except ImportError: + pass + + +def get_jira(): + username = os.environ['ATLASSIAN_USERNAME'] + password = os.environ['ATLASSIAN_PASSWORD'] + jira = Jira(url="https://datadoghq.atlassian.net", username=username, password=password) + + return jira + + +def close_issue(jira, issue_key: str, verbose_test: str, dry_run: bool = False): + print('Closing the issue', issue_key, 'for test', verbose_test) + + if dry_run: + return + + jira.issue_add_comment(issue_key, 'Closing this issue since the test is not failing anymore') + jira.issue_transition(issue_key, 'Done') + + +def get_failing_tests_names() -> set[str]: + """ + Returns the names of the failing tests for the last 28 days + """ + + print('Getting failing tests for the last 28 days') + body = CIAppTestsAggregateRequest( + compute=[ + CIAppCompute( + aggregation=CIAppAggregationFunction.COUNT, + metric="@test.full_name", + type=CIAppComputeType.TOTAL, + ), + ], + filter=CIAppTestsQueryFilter( + _from="now-28d", + query="@test.service:datadog-agent @git.branch:main @test.status:fail", + to="now", + ), + group_by=[ + CIAppTestsGroupBy( + facet="@test.full_name", + limit=10000, + sort=CIAppAggregateSort( + order=CIAppSortOrder.DESCENDING, + ), + total=False, + ), + ], + options=CIAppQueryOptions( + timezone="GMT", + ), + ) + + configuration = Configuration() + with ApiClient(configuration) as api_client: + api_instance = CIVisibilityTestsApi(api_client) + response = api_instance.aggregate_ci_app_test_events(body=body) + result = response['data']['buckets'] + tests = {row['by']['@test.full_name'].removeprefix('github.com/DataDog/datadog-agent/') for row in result} + + return tests diff --git a/tasks/libs/pipeline/github_jira_map.yaml b/tasks/libs/pipeline/github_jira_map.yaml index e4222fdd2f056..0564ae0206376 100644 --- a/tasks/libs/pipeline/github_jira_map.yaml +++ b/tasks/libs/pipeline/github_jira_map.yaml @@ -33,6 +33,7 @@ '@datadog/database-monitoring': DBMON '@datadog/agent-cspm': SEC '@datadog/telemetry-and-analytics': AIT +'@datadog/apm-trace-storage': APMTS '@datadog/asm-go': APPSEC '@datadog/agent-delivery': BARX '@datadog/agent-devx-loops': ADXT diff --git a/tasks/libs/pipeline/github_slack_map.yaml b/tasks/libs/pipeline/github_slack_map.yaml index 42f19b76c9bb4..9897fa9c53ccc 100644 --- a/tasks/libs/pipeline/github_slack_map.yaml +++ b/tasks/libs/pipeline/github_slack_map.yaml @@ -36,8 +36,10 @@ '@datadog/database-monitoring': '#database-monitoring' '@datadog/agent-cspm': '#k9-cspm-ops' '@datadog/telemetry-and-analytics': '#instrumentation-telemetry' -'@datadog/asm-go': '#k9-asm-library-go' +'@datadog/apm-trace-storage': '#apm-trace-storage' +'@datadog/asm-go': '#k9-library-go' '@datadog/agent-delivery': '#agent-delivery-ops' '@datadog/agent-devx-infra': '#agent-devx-ops' '@datadog/agent-devx-loops': '#agent-devx-ops' '@datadog/apm-onboarding': '#apm-onboarding' +'@datadog/apm-reliability-and-performance': '#apm-ecosystems-reliability-and-performance' diff --git a/tasks/libs/releasing/qa.py b/tasks/libs/releasing/qa.py new file mode 100644 index 0000000000000..9483b2f05c45e --- /dev/null +++ b/tasks/libs/releasing/qa.py @@ -0,0 +1,23 @@ +import os + +from tasks.libs.ciproviders.github_api import GithubAPI + + +def setup_ddqa(ctx): + """ + Setup the environment for ddqa + """ + config_file = ctx.run("ddqa config show", hide=True).stdout.strip() + with open(config_file, "w") as config, open("tools/agent_QA/ddqa_template_config.toml") as template: + config.write(template.read()) + ctx.run(f"ddqa config set repo.datadog-agent.path {os.getcwd()}", hide=True) + gh = GithubAPI() + ctx.run("ddqa config set github.user github-actions[bot]", hide=True) + ctx.run(f"ddqa config set github.token {gh._auth.token}", hide=True) + ctx.run(f"ddqa config set jira.email {os.getenv('ATLASSIAN_USERNAME')}", hide=True) + ctx.run(f"ddqa config set jira.token {os.getenv('ATLASSIAN_PASSWORD')}", hide=True) + ctx.run("ddqa --auto sync", hide=True) + + +def get_labels(version): + return f"-l {version} -l {version.qa_label()} -l ddqa" diff --git a/tasks/libs/releasing/version.py b/tasks/libs/releasing/version.py index 4709d3a97acb6..8c781fb2e134f 100644 --- a/tasks/libs/releasing/version.py +++ b/tasks/libs/releasing/version.py @@ -265,10 +265,12 @@ def get_version( agent_version_cache_file_exist = os.path.exists(AGENT_VERSION_CACHE_NAME) if not agent_version_cache_file_exist: if pipeline_id and pipeline_id.isdigit() and project_name == REPO_NAME: - ctx.run( + result = ctx.run( f"aws s3 cp s3://dd-ci-artefacts-build-stable/datadog-agent/{pipeline_id}/{AGENT_VERSION_CACHE_NAME} .", hide="stdout", ) + if "unable to locate credentials" in result.stderr.casefold(): + raise Exit("Permanent error: unable to locate credentials, retry the job", 42) agent_version_cache_file_exist = True if agent_version_cache_file_exist: @@ -327,10 +329,12 @@ def get_version_numeric_only(ctx, major_version='7'): if pipeline_id and pipeline_id.isdigit() and project_name == REPO_NAME: try: if not os.path.exists(AGENT_VERSION_CACHE_NAME): - ctx.run( + result = ctx.run( f"aws s3 cp s3://dd-ci-artefacts-build-stable/datadog-agent/{pipeline_id}/{AGENT_VERSION_CACHE_NAME} .", hide="stdout", ) + if "unable to locate credentials" in result.stderr.casefold(): + raise Exit("Permanent error: unable to locate credentials, retry the job", 42) with open(AGENT_VERSION_CACHE_NAME) as file: cache_data = json.load(file) diff --git a/tasks/libs/types/arch.py b/tasks/libs/types/arch.py index 7f5408bf0f9ef..241e9d65e087a 100644 --- a/tasks/libs/types/arch.py +++ b/tasks/libs/types/arch.py @@ -136,7 +136,7 @@ def local() -> Arch: kmt_arch="x86_64", windows_arch="x64", ci_arch="x64", - spellings={"amd64", "x86_64", "x64", "x86-64"}, + spellings={"amd64", "x86_64", "x64", "x86-64", "x86"}, ) ALL_ARCHS = [ARCH_AMD64, ARCH_ARM64] diff --git a/tasks/libs/types/copyright.py b/tasks/libs/types/copyright.py index a19773744f811..3d19e49ba4603 100755 --- a/tasks/libs/types/copyright.py +++ b/tasks/libs/types/copyright.py @@ -34,6 +34,7 @@ '/pkg/remoteconfig/state/products/apmsampling/.*_gen(_test){,1}.go', '/pkg/security/security_profile/dump/activity_dump_easyjson.go', '/pkg/security/probe/actions_easyjson.go', + '/pkg/security/probe/actions_linux_easyjson.go', '/pkg/security/probe/custom_events_easyjson.go', '/pkg/security/serializers/serializers_easyjson.go', '/pkg/security/serializers/serializers_linux_easyjson.go', @@ -66,6 +67,7 @@ '^// This file is licensed under the MIT License.', '^// Copyright \\(C\\) 2017 ScyllaDB', '^// Copyright \\(c\\) Tailscale Inc & AUTHORS', + '^// Code generated by github.com/tinylib/msgp DO NOT EDIT.', ] @@ -144,7 +146,7 @@ def _is_excluded_header(header, exclude=None): exclude = [] for matcher in exclude: - if re.search(matcher, header[0]): + if re.search(matcher, header[0]) or re.search(matcher, header[2]): return True return False diff --git a/tasks/libs/types/version.py b/tasks/libs/types/version.py index 24ae7eef887a0..3e85d0c3b7fc5 100644 --- a/tasks/libs/types/version.py +++ b/tasks/libs/types/version.py @@ -105,5 +105,17 @@ def next_version(self, bump_major=False, bump_minor=False, bump_patch=False, rc= return new_version + def previous_rc_version(self): + if self.patch is None or self.rc is None or self.rc == 0: + raise RuntimeError("Cannot determine the previous version of incomplete or non-rc version") + previous = self.clone() + if previous.rc == 1: + previous.devel = True + previous.rc -= 1 + return previous + + def qa_label(self): + return f"{self._safe_value('major')}.{self._safe_value('minor')}.{self._safe_value('patch')}-qa" + def tag_pattern(self): return f"{self._safe_value('major')}.{self._safe_value('minor')}.{self._safe_value('patch')}*" diff --git a/tasks/linter.py b/tasks/linter.py index 2136ae60f25d0..964a1c0b2834e 100644 --- a/tasks/linter.py +++ b/tasks/linter.py @@ -17,11 +17,11 @@ generate_gitlab_full_configuration, get_all_gitlab_ci_configurations, get_gitlab_ci_configuration, - get_gitlab_repo, get_preset_contexts, load_context, read_includes, retrieve_all_paths, + test_gitlab_configuration, ) from tasks.libs.common.check_tools_version import check_tools_version from tasks.libs.common.color import Color, color_message @@ -301,21 +301,33 @@ def command(module_results, module: GoModule, module_result): @task -def list_ssm_parameters(_): +def list_parameters(_, type): """ List all SSM parameters used in the datadog-agent repository. """ - - ssm_owner = re.compile(r"^[A-Z].*_SSM_(NAME|KEY): (?P[^ ]+) +# +(?P.+)$") - ssm_params = defaultdict(list) + if type == "ssm": + section_pattern = re.compile(r"aws ssm variables") + elif type == "vault": + section_pattern = re.compile(r"vault variables") + else: + raise Exit(f"{color_message('Error', Color.RED)}: pattern must be in [ssm, vault], not |{type}|") + in_param_section = False + param_owner = re.compile(r"^[^:]+: (?P[^ ]+) +# +(?P.+)$") + params = defaultdict(list) with open(".gitlab-ci.yml") as f: for line in f: - m = ssm_owner.match(line.strip()) - if m: - ssm_params[m.group("owner")].append(m.group("param")) - for owner in ssm_params.keys(): + section = section_pattern.search(line) + if section: + in_param_section = not in_param_section + if in_param_section: + if len(line.strip()) == 0: + break + m = param_owner.match(line.strip()) + if m: + params[m.group("owner")].append(m.group("param")) + for owner in params.keys(): print(f"Owner:{owner}") - for param in ssm_params[owner]: + for param in params[owner]: print(f" - {param}") @@ -349,11 +361,11 @@ def ssm_parameters(ctx, mode="all", folders=None): for filename in error_files: print(f" - {filename}") raise Exit(code=1) - print(f"[{color_message('OK', Color.GREEN)}] All files are correctly using wrapper for aws ssm parameters.") + print(f"[{color_message('OK', Color.GREEN)}] All files are correctly using wrapper for secret parameters.") class SSMParameterCall: - def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False, standard=True): + def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False): """ Initialize an SSMParameterCall instance. @@ -362,18 +374,16 @@ def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False, standa line_nb (int): The line number in the file where the SSM parameter call is located. with_wrapper (bool, optional): If the call is using the wrapper. Defaults to False. with_env_var (bool, optional): If the call is using an environment variable defined in .gitlab-ci.yml. Defaults to False. - not_standard (bool, optional): If the call is standard (matching either "aws ssm get-parameter --name" or "aws_ssm_get_wrapper"). Defaults to True. """ self.file = file self.line_nb = line_nb self.with_wrapper = with_wrapper self.with_env_var = with_env_var - self.standard = standard def __str__(self): message = "" - if not self.with_wrapper or not self.standard: - message += "Please use the dedicated `aws_ssm_get_wrapper.(sh|ps1)`." + if not self.with_wrapper: + message += "Please use the dedicated `fetch_secret.(sh|ps1)`." if not self.with_env_var: message += " Save your parameter name as environment variable in .gitlab-ci.yml file." return f"{self.file}:{self.line_nb + 1}. {message}" @@ -383,29 +393,24 @@ def __repr__(self): def list_get_parameter_calls(file): - ssm_get = re.compile(r"^.+ssm.get.+$") aws_ssm_call = re.compile(r"^.+ssm get-parameter.+--name +(?P[^ ]+).*$") - # remove the 'a' of 'aws' because '\a' is badly interpreted for windows paths - ssm_wrapper_call = re.compile(r"^.+ws_ssm_get_wrapper.(sh|ps1)[\"]? +(?P[^ )]+).*$") + # remove the first letter of the script name because '\f' is badly interpreted for windows paths + wrapper_call = re.compile(r"^.+etch_secret.(sh|ps1)[\"]? (-parameterName )?+(?P[^ )]+).*$") calls = [] with open(file) as f: try: for nb, line in enumerate(f): - is_ssm_get = ssm_get.match(line.strip()) - if is_ssm_get: - m = aws_ssm_call.match(line.strip()) - if m: - # Remove possible quotes - param = m["param"].replace('"', '').replace("'", "") - calls.append( - SSMParameterCall(file, nb, with_env_var=(param.startswith("$") or "os.environ" in param)) - ) - m = ssm_wrapper_call.match(line.strip()) - param = m["param"].replace('"', '').replace("'", "") if m else None - if m and not (param.startswith("$") or "os.environ" in param): - calls.append(SSMParameterCall(file, nb, with_wrapper=True)) - if not m: - calls.append(SSMParameterCall(file, nb, standard=False)) + m = aws_ssm_call.match(line.strip()) + if m: + # Remove possible quotes + param = m["param"].replace('"', '').replace("'", "") + calls.append( + SSMParameterCall(file, nb, with_env_var=(param.startswith("$") or "os.environ" in param)) + ) + m = wrapper_call.match(line.strip()) + param = m["param"].replace('"', '').replace("'", "") if m else None + if m and not (param.startswith("$") or "os.environ" in param): + calls.append(SSMParameterCall(file, nb, with_wrapper=True)) except UnicodeDecodeError: pass return calls @@ -419,33 +424,8 @@ def gitlab_ci(ctx, test="all", custom_context=None): This will lint the main gitlab ci file with different variable contexts and lint other triggered gitlab ci configs. """ - - agent = get_gitlab_repo() - has_errors = False - print(f'{color_message("info", Color.BLUE)}: Fetching Gitlab CI configurations...') - configs = get_all_gitlab_ci_configurations(ctx) - - def test_gitlab_configuration(entry_point, input_config, context=None): - nonlocal has_errors - - # Update config and lint it - config = generate_gitlab_full_configuration(ctx, entry_point, context=context, input_config=input_config) - res = agent.ci_lint.create({"content": config, "dry_run": True, "include_jobs": True}) - status = color_message("valid", "green") if res.valid else color_message("invalid", "red") - - print(f"{color_message(entry_point, Color.BOLD)} config is {status}") - if len(res.warnings) > 0: - print( - f'{color_message("warning", Color.ORANGE)}: {color_message(entry_point, Color.BOLD)}: {res.warnings})', - file=sys.stderr, - ) - if not res.valid: - print( - f'{color_message("error", Color.RED)}: {color_message(entry_point, Color.BOLD)}: {res.errors})', - file=sys.stderr, - ) - has_errors = True + configs = get_all_gitlab_ci_configurations(ctx, with_lint=False) for entry_point, input_config in configs.items(): with gitlab_section(f"Testing {entry_point}", echo=True): @@ -460,12 +440,9 @@ def test_gitlab_configuration(entry_point, input_config, context=None): print(f'{color_message("info", Color.BLUE)}: We will test {len(all_contexts)} contexts') for context in all_contexts: print("Test gitlab configuration with context: ", context) - test_gitlab_configuration(entry_point, input_config, dict(context)) + test_gitlab_configuration(ctx, entry_point, input_config, dict(context)) else: - test_gitlab_configuration(entry_point, input_config) - - if has_errors: - raise Exit(code=1) + test_gitlab_configuration(ctx, entry_point, input_config) @task @@ -620,7 +597,9 @@ def contains_valid_change_rule(rule): tests_without_change_path = defaultdict(list) tests_without_change_path_allowed = defaultdict(list) for test, filepath in tests: - if not any(contains_valid_change_rule(rule) for rule in config[test]['rules'] if isinstance(rule, dict)): + if "rules" in config[test] and not any( + contains_valid_change_rule(rule) for rule in config[test]['rules'] if isinstance(rule, dict) + ): if test in tests_without_change_path_allow_list: tests_without_change_path_allowed[filepath].append(test) else: diff --git a/tasks/modules.py b/tasks/modules.py index ebed764f57eaf..219e1b34c3d74 100644 --- a/tasks/modules.py +++ b/tasks/modules.py @@ -134,6 +134,7 @@ def dependency_path(self, agent_version): ), "cmd/agent/common/path": GoModule("cmd/agent/common/path", independent=True, used_by_otel=True), "comp/api/api/def": GoModule("comp/api/api/def", independent=True, used_by_otel=True), + "comp/api/authtoken": GoModule("comp/api/authtoken", independent=True, used_by_otel=True), "comp/core/config": GoModule("comp/core/config", independent=True, used_by_otel=True), "comp/core/flare/builder": GoModule("comp/core/flare/builder", independent=True, used_by_otel=True), "comp/core/flare/types": GoModule("comp/core/flare/types", independent=True, used_by_otel=True), @@ -163,8 +164,6 @@ def dependency_path(self, agent_version): "comp/otelcol/collector-contrib/impl": GoModule( "comp/otelcol/collector-contrib/impl", independent=True, used_by_otel=True ), - "comp/otelcol/configstore/def": GoModule("comp/otelcol/configstore/def", independent=True, used_by_otel=True), - "comp/otelcol/configstore/impl": GoModule("comp/otelcol/configstore/impl", independent=True, used_by_otel=True), "comp/otelcol/converter/def": GoModule("comp/otelcol/converter/def", independent=True, used_by_otel=True), "comp/otelcol/converter/impl": GoModule("comp/otelcol/converter/impl", independent=True, used_by_otel=True), "comp/otelcol/ddflareextension/def": GoModule( @@ -189,6 +188,9 @@ def dependency_path(self, agent_version): "comp/otelcol/otlp/components/metricsclient": GoModule( "comp/otelcol/otlp/components/metricsclient", independent=True, used_by_otel=True ), + "comp/otelcol/otlp/components/processor/infraattributesprocessor": GoModule( + "comp/otelcol/otlp/components/processor/infraattributesprocessor", independent=True, used_by_otel=True + ), "comp/otelcol/otlp/components/statsprocessor": GoModule( "comp/otelcol/otlp/components/statsprocessor", independent=True, used_by_otel=True ), @@ -210,13 +212,16 @@ def dependency_path(self, agent_version): "internal/tools/modparser": GoModule("internal/tools/modparser", condition=lambda: False, should_tag=False), "internal/tools/proto": GoModule("internal/tools/proto", condition=lambda: False, should_tag=False), "pkg/aggregator/ckey": GoModule("pkg/aggregator/ckey", independent=True, used_by_otel=True), - "pkg/api": GoModule("pkg/api", independent=True), + "pkg/api": GoModule("pkg/api", independent=True, used_by_otel=True), "pkg/collector/check/defaults": GoModule("pkg/collector/check/defaults", independent=True, used_by_otel=True), "pkg/config/env": GoModule("pkg/config/env", independent=True, used_by_otel=True), "pkg/config/mock": GoModule("pkg/config/mock", independent=True, used_by_otel=True), + "pkg/config/nodetreemodel": GoModule("pkg/config/nodetreemodel", independent=True, used_by_otel=True), "pkg/config/model": GoModule("pkg/config/model", independent=True, used_by_otel=True), "pkg/config/remote": GoModule("pkg/config/remote", independent=True), "pkg/config/setup": GoModule("pkg/config/setup", independent=True, used_by_otel=True), + "pkg/config/teeconfig": GoModule("pkg/config/teeconfig", independent=True, used_by_otel=True), + "pkg/config/structure": GoModule("pkg/config/structure", independent=True, used_by_otel=True), "pkg/config/utils": GoModule("pkg/config/utils", independent=True, used_by_otel=True), "pkg/errors": GoModule("pkg/errors", independent=True), "pkg/gohai": GoModule("pkg/gohai", independent=True, importable=False), @@ -281,9 +286,6 @@ def dependency_path(self, agent_version): "pkg/util/uuid": GoModule("pkg/util/uuid", independent=True), "pkg/util/winutil": GoModule("pkg/util/winutil", independent=True, used_by_otel=True), "pkg/version": GoModule("pkg/version", independent=True, used_by_otel=True), - "test/e2e/containers/otlp_sender": GoModule( - "test/e2e/containers/otlp_sender", condition=lambda: False, should_tag=False - ), "test/fakeintake": GoModule("test/fakeintake", independent=True), "test/new-e2e": GoModule( "test/new-e2e", @@ -503,3 +505,14 @@ def validate_used_by_otel(ctx: Context): message += "Please label them as \"used_by_otel\" in the DEFAULT_MODULES list." raise Exit(message) + + +def get_module_by_path(path: Path) -> GoModule | None: + """ + Return the GoModule object corresponding to the given path. + """ + for module in DEFAULT_MODULES.values(): + if Path(module.path) == path: + return module + + return None diff --git a/tasks/msi.py b/tasks/msi.py index 5574f67debb98..714646bec9540 100644 --- a/tasks/msi.py +++ b/tasks/msi.py @@ -2,6 +2,7 @@ msi namespaced tasks """ +import hashlib import mmap import os import shutil @@ -11,7 +12,7 @@ from invoke import task from invoke.exceptions import Exit, UnexpectedExit -from tasks.libs.common.utils import timed +from tasks.libs.common.utils import download_to_tempfile, timed from tasks.libs.releasing.version import get_version, load_release_versions # Windows only import @@ -29,6 +30,8 @@ BUILD_ROOT_DIR = os.path.join('C:\\', "dev", "msi", "DatadogAgentInstaller") BUILD_SOURCE_DIR = os.path.join(BUILD_ROOT_DIR, "src") BUILD_OUTPUT_DIR = os.path.join(BUILD_ROOT_DIR, "output") +# Match to AgentInstaller.cs BinSource +AGENT_BIN_SOURCE_DIR = os.path.join('C:\\', 'opt', 'datadog-agent', 'bin', 'agent') NUGET_PACKAGES_DIR = os.path.join(BUILD_ROOT_DIR, 'packages') NUGET_CONFIG_FILE = os.path.join(BUILD_ROOT_DIR, 'NuGet.config') @@ -178,7 +181,7 @@ def _build( # back to the mount. try: ctx.run( - f'robocopy {SOURCE_ROOT_DIR} {BUILD_SOURCE_DIR} /MIR /XF cabcache packages embedded2.COMPRESSED embedded3.COMPRESSED', + f'robocopy {SOURCE_ROOT_DIR} {BUILD_SOURCE_DIR} /MIR /XF *.COMPRESSED *.g.wxs *.msi *.exe /XD bin obj .vs cab cabcache packages', hide=True, ) except UnexpectedExit as e: @@ -433,3 +436,85 @@ def MsiClosing(obj): yield obj finally: obj.Close() + + +def get_msm_info(ctx, release_version): + """ + Get the merge module info from the release.json for the given release_version + """ + env = load_release_versions(ctx, release_version) + base_url = "https://s3.amazonaws.com/dd-windowsfilter/builds" + msm_info = {} + if 'WINDOWS_DDNPM_VERSION' in env: + info = { + 'filename': 'DDNPM.msm', + 'build': env['WINDOWS_DDNPM_DRIVER'], + 'version': env['WINDOWS_DDNPM_VERSION'], + 'shasum': env['WINDOWS_DDNPM_SHASUM'], + } + info['url'] = f"{base_url}/{info['build']}/ddnpminstall-{info['version']}.msm" + msm_info['DDNPM'] = info + if 'WINDOWS_DDPROCMON_VERSION' in env: + info = { + 'filename': 'DDPROCMON.msm', + 'build': env['WINDOWS_DDPROCMON_DRIVER'], + 'version': env['WINDOWS_DDPROCMON_VERSION'], + 'shasum': env['WINDOWS_DDPROCMON_SHASUM'], + } + info['url'] = f"{base_url}/{info['build']}/ddprocmoninstall-{info['version']}.msm" + msm_info['DDPROCMON'] = info + if 'WINDOWS_APMINJECT_VERSION' in env: + info = { + 'filename': 'ddapminstall.msm', + 'build': env['WINDOWS_APMINJECT_MODULE'], + 'version': env['WINDOWS_APMINJECT_VERSION'], + 'shasum': env['WINDOWS_APMINJECT_SHASUM'], + } + info['url'] = f"{base_url}/{info['build']}/ddapminstall-{info['version']}.msm" + msm_info['APMINJECT'] = info + return msm_info + + +@task( + iterable=['drivers'], + help={ + 'drivers': 'List of drivers to fetch (default: DDNPM, DDPROCMON, APMINJECT)', + 'release_version': 'Release version to fetch drivers from (default: nightly-a7)', + }, +) +def fetch_driver_msm(ctx, drivers=None, release_version=None): + """ + Fetch the driver merge modules (.msm) that are consumed by the Agent MSI. + + Defaults to the versions provided in the @release_version section of release.json + """ + ALLOWED_DRIVERS = ['DDNPM', 'DDPROCMON', 'APMINJECT'] + if not release_version: + release_version = 'nightly-a7' + + msm_info = get_msm_info(ctx, release_version) + if not drivers: + # if user did not specify drivers, use the ones in the release.json + drivers = msm_info.keys() + + for driver in drivers: + driver = driver.upper() + if driver not in ALLOWED_DRIVERS: + raise Exit(f"Invalid driver: {driver}, choose from {ALLOWED_DRIVERS}") + + info = msm_info[driver] + url = info['url'] + shasum = info['shasum'] + path = os.path.join(AGENT_BIN_SOURCE_DIR, info['filename']) + + # download from url with requests package + checksum = hashlib.sha256() + with download_to_tempfile(url, checksum) as tmp_path: + # check sha256 + if checksum.hexdigest().lower() != shasum.lower(): + raise Exit(f"Checksum mismatch for {url}") + # move to final path + shutil.move(tmp_path, path) + + print(f"Updated {driver}") + print(f"\t-> Downloaded {url} to {path}") diff --git a/tasks/notify.py b/tasks/notify.py index d87759c1c5fd1..7cdd97f12b170 100644 --- a/tasks/notify.py +++ b/tasks/notify.py @@ -1,23 +1,25 @@ from __future__ import annotations import os +import re import sys from datetime import timedelta +import yaml from invoke import Context, task from invoke.exceptions import Exit import tasks.libs.notify.unit_tests as unit_tests_utils from tasks.github_tasks import pr_commenter +from tasks.gitlab_helpers import compute_gitlab_ci_config_diff from tasks.libs.ciproviders.gitlab_api import ( MultiGitlabCIDiff, - get_all_gitlab_ci_configurations, ) from tasks.libs.common.color import Color, color_message -from tasks.libs.common.constants import DEFAULT_BRANCH from tasks.libs.common.datadog_api import send_metrics from tasks.libs.common.utils import gitlab_section from tasks.libs.notify import alerts, failure_summary, pipeline_status +from tasks.libs.notify.jira_failing_tests import close_issue, get_failing_tests_names, get_jira from tasks.libs.notify.utils import PROJECT_NAME from tasks.libs.pipeline.notifications import ( check_for_missing_owners_slack_and_jira, @@ -136,7 +138,9 @@ def unit_tests(ctx, pipeline_id, pipeline_url, branch_name, dry_run=False): @task -def gitlab_ci_diff(ctx, before: str | None = None, after: str | None = None, pr_comment: bool = False): +def gitlab_ci_diff( + ctx, before: str | None = None, after: str | None = None, pr_comment: bool = False, from_diff: str | None = None +): """ Creates a diff from two gitlab-ci configurations. @@ -166,20 +170,12 @@ def gitlab_ci_diff(ctx, before: str | None = None, after: str | None = None, pr_ job_url = os.environ['CI_JOB_URL'] try: - before_name = before or "merge base" - after_name = after or "local files" - - # The before commit is the LCA commit between before and after - before = before or DEFAULT_BRANCH - before = ctx.run(f'git merge-base {before} {after or "HEAD"}', hide=True).stdout.strip() - - print(f'Getting after changes config ({color_message(after_name, Color.BOLD)})') - after_config = get_all_gitlab_ci_configurations(ctx, git_ref=after, clean_configs=True) - - print(f'Getting before changes config ({color_message(before_name, Color.BOLD)})') - before_config = get_all_gitlab_ci_configurations(ctx, git_ref=before, clean_configs=True) - - diff = MultiGitlabCIDiff(before_config, after_config) + if from_diff: + with open(from_diff) as f: + diff_data = yaml.safe_load(f) + diff = MultiGitlabCIDiff.from_dict(diff_data) + else: + _, _, diff = compute_gitlab_ci_config_diff(ctx, before, after) if not diff: print(color_message("No changes in the gitlab-ci configuration", Color.GREEN)) @@ -227,3 +223,51 @@ def gitlab_ci_diff(ctx, before: str | None = None, after: str | None = None, pr_ ) raise + + +@task +def close_failing_tests_stale_issues(_, dry_run=False): + """ + Will mark as done all issues created by the [failed parent tests workflow](https://app.datadoghq.com/workflow/62670e82-8416-459b-bf74-9367b8a69277) that are stale. + Stale is an issue: + - In the "To Do" section of a project + - Where the test has not failed since 28 days + - That has no comment other than the bot's comments + + This task is executed periodically. + """ + + re_test_name = re.compile('Test name: (.*)\n') + + still_failing = get_failing_tests_names() + jira = get_jira() + + print('Getting potential issues to close') + issues = jira.jql('status = "To Do" AND summary ~ "Failed agent CI test"')['issues'] + + print(f'{len(issues)} failing test cards found') + + n_closed = 0 + for issue in issues: + # No comment other than the bot's comments + comments = issue['fields']['comment']['comments'] + has_no_comments = True + test_name = None + for comment in comments: + # This is not a bot message + if 'robot' not in comment['author']['displayName'].casefold(): + has_no_comments = False + break + + test_name_match = re_test_name.findall(comment['body']) + if test_name_match: + test_name = test_name_match[0] + + if has_no_comments and test_name and test_name not in still_failing: + try: + close_issue(jira, issue['key'], test_name, dry_run) + n_closed += 1 + except Exception as e: + print(f'Error closing issue {issue["key"]}: {e}', file=sys.stderr) + + print(f'Closed {n_closed} issues without failing tests') diff --git a/tasks/omnibus.py b/tasks/omnibus.py index 9caaf29941353..b6f1e03d1d7c2 100644 --- a/tasks/omnibus.py +++ b/tasks/omnibus.py @@ -90,6 +90,7 @@ def get_omnibus_env( go_mod_cache=None, flavor=AgentFlavor.base, pip_config_file="pip.conf", + custom_config_dir=None, ): env = load_release_versions(ctx, release_version) @@ -133,20 +134,26 @@ def get_omnibus_env( env['SYSTEM_PROBE_BIN'] = system_probe_bin env['AGENT_FLAVOR'] = flavor.name + if custom_config_dir: + env["OUTPUT_CONFIG_DIR"] = custom_config_dir + # We need to override the workers variable in omnibus build when running on Kubernetes runners, # otherwise, ohai detect the number of CPU on the host and run the make jobs with all the CPU. kubernetes_cpu_request = os.environ.get('KUBERNETES_CPU_REQUEST') if kubernetes_cpu_request: env['OMNIBUS_WORKERS_OVERRIDE'] = str(int(kubernetes_cpu_request) + 1) - # Forward the DEPLOY_AGENT variable so that we can use a higher compression level for deployed artifacts - deploy_agent = os.environ.get('DEPLOY_AGENT') - if deploy_agent: - env['DEPLOY_AGENT'] = deploy_agent - if 'PACKAGE_ARCH' in os.environ: - env['PACKAGE_ARCH'] = os.environ['PACKAGE_ARCH'] - if 'INSTALL_DIR' in os.environ: - print('Forwarding INSTALL_DIR') - env['INSTALL_DIR'] = os.environ['INSTALL_DIR'] + env_to_forward = [ + # Forward the DEPLOY_AGENT variable so that we can use a higher compression level for deployed artifacts + 'DEPLOY_AGENT', + 'PACKAGE_ARCH', + 'INSTALL_DIR', + 'DD_CC', + 'DD_CXX', + 'DD_CMAKE_TOOLCHAIN', + ] + for key in env_to_forward: + if key in os.environ: + env[key] = os.environ[key] return env @@ -177,6 +184,7 @@ def build( pip_config_file="pip.conf", host_distribution=None, install_directory=None, + config_directory=None, target_project=None, ): """ @@ -208,6 +216,7 @@ def build( go_mod_cache=go_mod_cache, flavor=flavor, pip_config_file=pip_config_file, + custom_config_dir=config_directory, ) if not target_project: @@ -246,7 +255,7 @@ def build( # For instance if git_cache_dir is set to "/git/cache/dir" and install_dir is # set to /a/b/c, the cache git repository will be located in # /git/cache/dir/a/b/c/.git - if install_directory is None: + if not install_directory: install_directory = install_dir_for_project(target_project) # Is the path starts with a /, it's considered the new root for the joined path # which effectively drops whatever was in omnibus_cache_dir @@ -379,3 +388,41 @@ def manifest( omnibus_s3_cache=False, log_level=log_level, ) + + +@task +def rpath_edit(ctx, install_path, target_rpath_dd_folder, platform="linux"): + # Collect mime types for all files inside the Agent installation + files = ctx.run(rf"find {install_path} -type f -exec file --mime-type \{{\}} \+", hide=True).stdout + for line in files.splitlines(): + if not line: + continue + file, file_type = line.split(":") + file_type = file_type.strip() + + if platform == "linux": + if file_type not in ["application/x-executable", "inode/symlink", "application/x-sharedlib"]: + continue + binary_rpath = ctx.run(f'objdump -x {file} | grep "RPATH"', warn=True, hide=True).stdout + else: + if file_type != "application/x-mach-binary": + continue + binary_rpath = ctx.run(f'otool -l {file} | grep -A 2 "RPATH"', warn=True, hide=True).stdout + + if install_path in binary_rpath: + new_rpath = os.path.relpath(target_rpath_dd_folder, os.path.dirname(file)) + if platform == "linux": + ctx.run(f"patchelf --force-rpath --set-rpath \\$ORIGIN/{new_rpath}/embedded/lib {file}") + else: + # The macOS agent binary has 18 RPATH definition, replacing the first one should be enough + # but just in case we're replacing them all. + # We're also avoiding unnecessary `install_name_tool` call as much as possible. + number_of_rpaths = binary_rpath.count('\n') // 3 + for _ in range(number_of_rpaths): + exit_code = ctx.run( + f"install_name_tool -rpath {install_path}/embedded/lib @loader_path/{new_rpath}/embedded/lib {file}", + warn=True, + hide=True, + ).exited + if exit_code != 0: + break diff --git a/tasks/otel_agent.py b/tasks/otel_agent.py index 7c6572ebdc8fc..7e1b0df39412b 100644 --- a/tasks/otel_agent.py +++ b/tasks/otel_agent.py @@ -4,7 +4,7 @@ from invoke import task from invoke.exceptions import Exit -from tasks.libs.common.utils import REPO_PATH, bin_name +from tasks.libs.common.utils import REPO_PATH, bin_name, get_version_ldflags BIN_NAME = "otel-agent" CFG_NAME = "otel-config.yaml" @@ -25,8 +25,9 @@ def build(ctx): env = {"GO111MODULE": "on"} build_tags = ['otlp'] + ldflags = get_version_ldflags(ctx, major_version='7') - cmd = f"go build -mod=mod -tags=\"{' '.join(build_tags)}\" -o {BIN_PATH} {REPO_PATH}/cmd/otel-agent" + cmd = f"go build -mod=mod -tags=\"{' '.join(build_tags)}\" -ldflags=\"{ldflags}\" -o {BIN_PATH} {REPO_PATH}/cmd/otel-agent" ctx.run(cmd, env=env) diff --git a/tasks/pipeline.py b/tasks/pipeline.py index 8072ba025d4c5..97fec5231f87f 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -529,7 +529,7 @@ def changelog(ctx, new_commit_sha): else: parent_dir = os.getcwd() old_commit_sha = ctx.run( - f"{parent_dir}/tools/ci/aws_ssm_get_wrapper.sh {os.environ['CHANGELOG_COMMIT_SHA_SSM_NAME']}", + f"{parent_dir}/tools/ci/fetch_secret.sh {os.environ['CHANGELOG_COMMIT_SHA']}", hide=True, ).stdout.strip() if not new_commit_sha: @@ -582,10 +582,10 @@ def changelog(ctx, new_commit_sha): if messages: slack_message += ( "\n".join(messages) + "\n:wave: Authors, please check the " - " for issues" + " for issues" ) else: slack_message += empty_changelog_msg @@ -593,11 +593,13 @@ def changelog(ctx, new_commit_sha): print(f"Posting message to slack: \n {slack_message}") send_slack_message("system-probe-ops", slack_message) print(f"Writing new commit sha: {new_commit_sha} to SSM") - ctx.run( + res = ctx.run( f"aws ssm put-parameter --name ci.datadog-agent.gitlab_changelog_commit_sha --value {new_commit_sha} " "--type \"SecureString\" --region us-east-1 --overwrite", hide=True, ) + if "unable to locate credentials" in res.stderr.casefold(): + raise Exit("Permanent error: unable to locate credentials, retry the job", code=42) @task @@ -1027,7 +1029,7 @@ def compare_to_itself(ctx): ctx.run("git config --global user.email 'github-app[bot]@users.noreply.github.com'", hide=True) # The branch must exist in gitlab to be able to "compare_to" # Push an empty commit to prevent linking this pipeline to the actual PR - ctx.run("git commit -m 'Compare to itself' --allow-empty", hide=True) + ctx.run("git commit -m 'Initial push of the compare/to branch' --allow-empty", hide=True) ctx.run(f"git push origin {new_branch}") from tasks.libs.releasing.json import load_release_json @@ -1040,7 +1042,7 @@ def compare_to_itself(ctx): with open(file, 'w') as f: f.write(content.replace(f'compare_to: {release_json["base_branch"]}', f'compare_to: {new_branch}')) - ctx.run("git commit -am 'Compare to itself'", hide=True) + ctx.run("git commit -am 'Commit to compare to itself'", hide=True) ctx.run(f"git push origin {new_branch}", hide=True) max_attempts = 6 compare_to_pipeline = None diff --git a/tasks/release.py b/tasks/release.py index ac168432829c9..52dbc4fde4be9 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -29,7 +29,7 @@ clone, get_current_branch, get_last_commit, - get_last_tag, + get_last_release_tag, try_git_command, ) from tasks.libs.common.user_interactions import yes_no_question @@ -598,6 +598,7 @@ def create_release_branches(ctx, base_directory="~/dd", major_versions="6,7", up current = current_version(ctx, max(list_major_versions)) next = current.next_version(bump_minor=True) current.rc = False + current.devel = False next.devel = False # Strings with proper branch/tag names @@ -1008,7 +1009,7 @@ def check_for_changes(ctx, release_branch, warning_mode=False): changes = 'false' for repo_name, repo in repo_data.items(): head_commit = get_last_commit(ctx, repo_name, repo['branch']) - last_tag_commit, last_tag_name = get_last_tag(ctx, repo_name, next_version.tag_pattern()) + last_tag_commit, last_tag_name = get_last_release_tag(ctx, repo_name, next_version.tag_pattern()) if last_tag_commit != "" and last_tag_commit != head_commit: changes = 'true' print(f"{repo_name} has new commits since {last_tag_name}", file=sys.stderr) @@ -1033,3 +1034,18 @@ def check_for_changes(ctx, release_branch, warning_mode=False): ) # Send a value for the create_rc_pr.yml workflow print(changes) + + +@task +def create_qa_cards(ctx, tag): + """ + Automate the call to ddqa + """ + from tasks.libs.releasing.qa import get_labels, setup_ddqa + + version = _create_version_from_match(RC_VERSION_RE.match(tag)) + if not version.rc: + print(f"{tag} is not a release candidate, skipping") + return + setup_ddqa(ctx) + ctx.run(f"ddqa --auto create {version.previous_rc_version()} {tag} {get_labels(version)}") diff --git a/tasks/rtloader.py b/tasks/rtloader.py index 3d264c7d01610..e42a21f9be620 100644 --- a/tasks/rtloader.py +++ b/tasks/rtloader.py @@ -64,6 +64,8 @@ def make(ctx, install_prefix=None, python_runtimes='3', cmake_options=''): cmake_options += " -G \"Unix Makefiles\"" cmake_args = cmake_options + f" -DBUILD_DEMO:BOOL=OFF -DCMAKE_INSTALL_PREFIX:PATH={install_prefix or dev_path}" + if os.getenv('DD_CMAKE_TOOLCHAIN'): + cmake_args += f' --toolchain {os.getenv("DD_CMAKE_TOOLCHAIN")}' python_runtimes = python_runtimes.split(',') diff --git a/tasks/security_agent.py b/tasks/security_agent.py index 9b38f03ad6f07..b3d7386a36ef8 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -74,7 +74,9 @@ def build( go_mod=go_mod, ) - ldflags, gcflags, env = get_build_flags(ctx, major_version=major_version, python_runtimes='3', static=static) + ldflags, gcflags, env = get_build_flags( + ctx, major_version=major_version, python_runtimes='3', static=static, install_path=install_path + ) main = "main." ld_vars = { diff --git a/tasks/setup.py b/tasks/setup.py index f1ffa8d689718..0426543a16010 100644 --- a/tasks/setup.py +++ b/tasks/setup.py @@ -43,7 +43,6 @@ def setup(ctx, vscode=False): check_python_version, check_go_version, update_python_dependencies, - download_go_tools, install_go_tools, install_protoc, enable_pre_commit, @@ -257,19 +256,3 @@ def install_protoc(ctx) -> SetupResult: status = Status.FAIL return SetupResult("Install protoc", status, message) - - -def download_go_tools(ctx) -> SetupResult: - print(color_message("Downloading go tools...", Color.BLUE)) - status = Status.OK - message = "" - - try: - from tasks import download_tools - - download_tools(ctx) - except Exception as e: - message = f'Download Go tools failed: {e}' - status = Status.FAIL - - return SetupResult("Download Go tools", status, message) diff --git a/tasks/system_probe.py b/tasks/system_probe.py index 4714474f5b564..e54131bbd2fe0 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -54,6 +54,7 @@ "./pkg/collector/corechecks/ebpf/...", "./pkg/collector/corechecks/servicediscovery/module/...", "./pkg/process/monitor/...", + "./pkg/dynamicinstrumentation/...", ] TEST_PACKAGES = " ".join(TEST_PACKAGES_LIST) # change `timeouts` in `test/new-e2e/system-probe/test-runner/main.go` if you change them here @@ -332,7 +333,7 @@ def ninja_test_ebpf_programs(nw: NinjaWriter, build_dir): ebpf_c_dir = os.path.join(ebpf_bpf_dir, "testdata", "c") test_flags = "-g -DDEBUG=1" - test_programs = ["logdebug-test", "error_telemetry"] + test_programs = ["logdebug-test", "error_telemetry", "uprobe_attacher-test"] for prog in test_programs: infile = os.path.join(ebpf_c_dir, f"{prog}.c") @@ -386,6 +387,7 @@ def ninja_runtime_compilation_files(nw: NinjaWriter, gobin): "pkg/network/tracer/connection/kprobe/compile.go": "tracer", "pkg/network/tracer/offsetguess_test.go": "offsetguess-test", "pkg/security/ebpf/compile.go": "runtime-security", + "pkg/dynamicinstrumentation/codegen/compile.go": "dynamicinstrumentation", } nw.rule( @@ -494,6 +496,7 @@ def ninja_cgo_type_files(nw: NinjaWriter): "pkg/ebpf/types.go": [ "pkg/ebpf/c/lock_contention.h", ], + "pkg/dynamicinstrumentation/ditypes/ebpf.go": ["pkg/dynamicinstrumentation/codegen/c/types.h"], } nw.rule( name="godefs", @@ -513,7 +516,7 @@ def ninja_cgo_type_files(nw: NinjaWriter): inputs=[f], outputs=[os.path.join(in_dir, out_file)], rule="godefs", - implicit=headers, + implicit=headers + [script_path], variables={ "in_dir": in_dir, "in_file": in_file, @@ -842,7 +845,6 @@ def go_package_dirs(packages, build_tags): This handles the ellipsis notation (eg. ./pkg/ebpf/...) """ - target_packages = [] format_arg = '{{ .Dir }}' buildtags_arg = ",".join(build_tags) packages_arg = " ".join(packages) diff --git a/tasks/testwasher.py b/tasks/testwasher.py index 18536aef17e58..b3ceb6d6c8e09 100644 --- a/tasks/testwasher.py +++ b/tasks/testwasher.py @@ -8,7 +8,7 @@ from invoke import task from tasks.libs.ciproviders.gitlab_api import ( - get_full_gitlab_ci_configuration, + resolve_gitlab_ci_configuration, ) from tasks.libs.common.utils import gitlab_section from tasks.test_core import ModuleTestResult @@ -180,7 +180,7 @@ def generate_flake_finder_pipeline(ctx, n=3): """ # Read gitlab config - config = get_full_gitlab_ci_configuration(ctx, ".gitlab-ci.yml") + config = resolve_gitlab_ci_configuration(ctx, ".gitlab-ci.yml") # Lets keep only variables and jobs with flake finder variable kept_job = {} diff --git a/tasks/unit_tests/gitlab_api_tests.py b/tasks/unit_tests/gitlab_api_tests.py index 28f35457379dd..5f2add57d44f8 100644 --- a/tasks/unit_tests/gitlab_api_tests.py +++ b/tasks/unit_tests/gitlab_api_tests.py @@ -6,6 +6,7 @@ from tasks.libs.ciproviders.gitlab_api import ( GitlabCIDiff, + MultiGitlabCIDiff, clean_gitlab_ci_configuration, expand_matrix_jobs, filter_gitlab_ci_configuration, @@ -170,13 +171,56 @@ def test_make_diff(self): 'script': 'echo "???"', }, } - diff = GitlabCIDiff(before, after) + diff = GitlabCIDiff.from_contents(before, after) self.assertSetEqual(diff.modified, {'job1'}) self.assertSetEqual(set(diff.modified_diffs.keys()), {'job1'}) self.assertSetEqual(diff.removed, {'job4'}) self.assertSetEqual(diff.added, {'job5'}) self.assertSetEqual(diff.renamed, {('job2', 'job2_renamed')}) + def test_serialization(self): + before = { + 'job1': { + 'script': [ + 'echo "hello"', + 'echo "hello?"', + 'echo "hello!"', + ] + }, + 'job2': { + 'script': 'echo "world"', + }, + 'job3': { + 'script': 'echo "!"', + }, + 'job4': { + 'script': 'echo "?"', + }, + } + after = { + 'job1': { + 'script': [ + 'echo "hello"', + 'echo "bonjour?"', + 'echo "hello!"', + ] + }, + 'job2_renamed': { + 'script': 'echo "world"', + }, + 'job3': { + 'script': 'echo "!"', + }, + 'job5': { + 'script': 'echo "???"', + }, + } + diff = MultiGitlabCIDiff.from_contents({'file': before}, {'file': after}) + dict_diff = diff.to_dict() + diff_from_dict = MultiGitlabCIDiff.from_dict(dict_diff) + + self.assertDictEqual(diff_from_dict.before, diff.before) + class TestRetrieveAllPaths(unittest.TestCase): def test_all_configs(self): diff --git a/tasks/unit_tests/junit_tests.py b/tasks/unit_tests/junit_tests.py index 00a95dc49e48d..37876198a4668 100644 --- a/tasks/unit_tests/junit_tests.py +++ b/tasks/unit_tests/junit_tests.py @@ -55,7 +55,7 @@ def test_without_split(self): def test_with_split(self): xml_file = Path("./tasks/unit_tests/testdata/secret.tar.gz/-go-src-datadog-agent-junit-out-base.xml") owners = read_owners(".github/CODEOWNERS") - self.assertEqual(junit.split_junitxml(xml_file.parent, xml_file, owners, []), 28) + self.assertEqual(junit.split_junitxml(xml_file.parent, xml_file, owners, []), 27) class TestGroupPerTag(unittest.TestCase): @@ -144,4 +144,4 @@ def test_e2e(self, mock_popen, mock_gitlab): mock_gitlab.return_value = mock_project junit.junit_upload_from_tgz("tasks/unit_tests/testdata/testjunit-tests_deb-x64-py3.tgz") mock_popen.assert_called() - self.assertEqual(mock_popen.call_count, 30) + self.assertEqual(mock_popen.call_count, 29) diff --git a/tasks/unit_tests/libs/common/git_tests.py b/tasks/unit_tests/libs/common/git_tests.py index dcfd884f76115..4a803a58b2758 100644 --- a/tasks/unit_tests/libs/common/git_tests.py +++ b/tasks/unit_tests/libs/common/git_tests.py @@ -1,11 +1,14 @@ import unittest from unittest.mock import MagicMock +from invoke import MockContext, Result + from tasks.libs.common.git import ( check_local_branch, check_uncommitted_changes, get_commit_sha, get_current_branch, + get_last_release_tag, get_staged_files, ) @@ -107,3 +110,71 @@ def test_get_commit_sha(self): f"git rev-parse {'--short ' if test['short'] else ''}HEAD", hide=True ) self.ctx_mock.run.reset_mock() + + +class TestGetLastTag(unittest.TestCase): + def test_ordered(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(name, "7.56.0-rc.3") + + def test_non_ordered(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.11\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(name, "7.56.0-rc.11") + + def test_suffix_lower(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(name, "7.56.0-rc.3") + + def test_suffix_equal(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.3^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + commit, _ = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(commit, "7c6777bb7add533a789c69293b59e3261711d330") + + def test_suffix_greater(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( + "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.4^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.56.*") + self.assertEqual(name, "7.56.0-rc.4") + + def test_only_release_tags(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/woof "7.57.*"': Result( + "43638bd55a74fd6ec51264cc7b3b1003d0b1c7ac\trefs/tags/7.57.0-dbm-mongo-1.5\ne01bcf3d12e6d6742b1fa8296882938c6dba9922\trefs/tags/7.57.0-devel\n6a5ad7fda590c7b8ba7036bca70dc8a0872e7afe\trefs/tags/7.57.0-devel^{}\n2c2eb2293cccd33100d7d930a59c136319942915\trefs/tags/7.57.0-installer-0.5.0-rc.1\n2c2eb2293cccd33100d7d930a59c136319942915\trefs/tags/7.57.0-installer-0.5.0-rc.2\n6a91fcca0ade9f77f08cd98d923a8d9ec18d7e8f\trefs/tags/7.57.0-installer-0.5.0-rc.3\n7e8ffc3de15f0486e6cb2184fa59f02da6ecfab9\trefs/tags/7.57.0-rc.1\nfa72fd12e3483a2d5957ea71fe01a8b1af376424\trefs/tags/7.57.0-rc.1^{}\n22587b746d6a0876cb7477b9b335e8573bdc3ac5\trefs/tags/7.57.0-rc.2\nd6c151a36487c3b54145ae9bf200f6c356bb9348\trefs/tags/7.57.0-rc.2^{}\n948ed4dd8c8cdf0aae467997086bb2229d4f1916\trefs/tags/7.57.0-rc.3\n259ed086a45960006e110622332cc8a39f9c6bb9\trefs/tags/7.57.0-rc.3^{}\na249f4607e5da894715a3e011dba8046b46678ed\trefs/tags/7.57.0-rc.4\n51a3b405a244348aec711d38e5810a6d88075b77\trefs/tags/7.57.0-rc.4^{}\n06519be707d6f24fb8265cde5a50cf0a66d5cb02\trefs/tags/7.57.0-rc.5\n7f43a5180446290f498742e68d8b28a75da04188\trefs/tags/7.57.0-rc.5^{}\n6bb640559e7626131290c63dab3959ba806c9886\trefs/tags/7.57.0-rc.6\nc5ed1f8b4734d31e94c2a83f307dbcb2b5a1faac\trefs/tags/7.57.0-rc.6^{}\n260697e624bb1d92ad306fdc301aab9b2975a627\trefs/tags/7.57.0-rc.7\n48617a0f56747e33b75d3dcf570bc2237726dc0e\trefs/tags/7.57.0-rc.7^{}\n5e11e104ff99b40b01ff2cfa702c0e4a465f98de\trefs/tags/7.57.1-beta-ndm-rdns-enrichment\n91c7c85d7c8fbb94421a90b273aea75630617eef\trefs/tags/7.57.1-beta-ndm-rdns-enrichment^{}\n3ad359da2894fa3de6e265c56dea8fabdb128454\trefs/tags/7.57.1-beta-ndm-rdns-enrichment2\n86683ad80578912014cc947dcf247ba020532403\trefs/tags/7.57.1-beta-ndm-rdns-enrichment2^{}" + ) + } + ) + _, name = get_last_release_tag(c, "woof", "7.57.*") + self.assertEqual(name, "7.57.0-rc.7") diff --git a/tasks/unit_tests/linter_tests.py b/tasks/unit_tests/linter_tests.py index 78ac5a323ee1d..f3e3d0c51d0ce 100644 --- a/tasks/unit_tests/linter_tests.py +++ b/tasks/unit_tests/linter_tests.py @@ -32,7 +32,7 @@ def test_without_wrapper_no_env(self): def test_without_wrapper_with_env(self): with open(self.test_file, "w") as f: f.write( - " - export DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name $API_KEY_ORG2_SSM_NAME --with-decryption --query Parameter.Value --out text" + " - DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name $API_KEY_ORG2 --with-decryption --query Parameter.Value --out text || exit $?; export DD_API_KEY" ) matched = linter.list_get_parameter_calls(self.test_file)[0] self.assertFalse(matched.with_wrapper) @@ -41,7 +41,7 @@ def test_without_wrapper_with_env(self): def test_with_wrapper_no_env(self): with open(self.test_file, "w") as f: f.write( - "export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh test.datadog-agent.datadog_api_key_org2)" + "DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh test.datadog-agent.datadog_api_key_org2) || exit $?; export DD_API_KEY" ) matched = linter.list_get_parameter_calls(self.test_file)[0] self.assertTrue(matched.with_wrapper) @@ -49,25 +49,26 @@ def test_with_wrapper_no_env(self): def test_with_wrapper_with_env(self): with open(self.test_file, "w") as f: - f.write("export DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $APP_KEY_ORG2_SSM_NAME)") + f.write( + "DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $APP_KEY_ORG2) || exit $?; export DD_APP_KEY" + ) matched = linter.list_get_parameter_calls(self.test_file) self.assertListEqual([], matched) def test_multi_match_windows(self): with open(self.test_file, "w") as f: f.write( - 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get_wrapper.ps1" "test.datadog-agent.datadog_api_key_org2)\n' - 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\aws_ssm_get wrapper.ps1" "$Env:MISSING_UNDERSCORE)\n' - '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "bad.name")\n' - 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\aws_ssm_get_wrapper.ps1" "$Env:TEST")\n' + 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch_secret.ps1" -parameterName test.datadog-agent.datadog_api_key_org2 -tempFile $tmpfile)\n' + 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch secret.ps1" -parameterName "$Env:MISSING_UNDERSCORE" -tempFile $tmpfile)\n' + '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" -parameterName "bad.name" -tempFile "$tmpfile")\n' + 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" -parameterName "$Env:TEST" -tempFile $tmpfile)\n' ) matched = linter.list_get_parameter_calls(self.test_file) - self.assertEqual(3, len(matched)) + self.assertEqual(2, len(matched)) self.assertTrue(matched[0].with_wrapper) self.assertFalse(matched[0].with_env_var) - self.assertFalse(matched[1].standard) - self.assertTrue(matched[2].with_wrapper) - self.assertFalse(matched[2].with_env_var) + self.assertTrue(matched[1].with_wrapper) + self.assertFalse(matched[1].with_env_var) class TestGitlabChangePaths(unittest.TestCase): diff --git a/tasks/unit_tests/omnibus_tests.py b/tasks/unit_tests/omnibus_tests.py index 1d7438303eefd..1aeaf35e411f7 100644 --- a/tasks/unit_tests/omnibus_tests.py +++ b/tasks/unit_tests/omnibus_tests.py @@ -41,7 +41,7 @@ def _run_calls_to_string(mock_calls): 'CI_PIPELINE_ID': '', 'RELEASE_VERSION_7': 'nightly', 'S3_OMNIBUS_CACHE_BUCKET': 'omnibus-cache', - 'API_KEY_ORG2_SSM_NAME': 'api-key', + 'API_KEY_ORG2': 'api-key', }, clear=True, ) diff --git a/tasks/unit_tests/pipeline_tests.py b/tasks/unit_tests/pipeline_tests.py index a418bc1fbb67d..366fc3b54bb92 100644 --- a/tasks/unit_tests/pipeline_tests.py +++ b/tasks/unit_tests/pipeline_tests.py @@ -106,9 +106,9 @@ class TestCompareToItself(unittest.TestCase): "git remote set-url origin https://x-access-token:zidane@github.com/DataDog/datadog-agent.git": Result(), "git config --global user.name 'github-actions[bot]'": Result(), "git config --global user.email 'github-app[bot]@users.noreply.github.com'": Result(), - "git commit -m 'Compare to itself' --allow-empty": Result(), + "git commit -m 'Initial push of the compare/to branch' --allow-empty": Result(), "git push origin compare/Football/900284400": Result(), - "git commit -am 'Compare to itself'": Result(), + "git commit -am 'Commit to compare to itself'": Result(), "git checkout Football": Result(), "git branch -D compare/Football/900284400": Result(), "git push origin :compare/Football/900284400": Result(), diff --git a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml index 3a5f0ec2d1bc7..74d4ee191925a 100644 --- a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml @@ -19,7 +19,6 @@ extensions: exporters: - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 - - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 diff --git a/tasks/unit_tests/testdata/fake_gitlab-ci.yml b/tasks/unit_tests/testdata/fake_gitlab-ci.yml index a07aa2828d10c..06478a78e1dcb 100644 --- a/tasks/unit_tests/testdata/fake_gitlab-ci.yml +++ b/tasks/unit_tests/testdata/fake_gitlab-ci.yml @@ -174,15 +174,15 @@ variables: DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded DEB_GPG_KEY_ID: ad9589b7 DEB_GPG_KEY_NAME: "Datadog, Inc. Master key" - DEB_GPG_KEY_SSM_NAME: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} - DEB_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} + DEB_GPG_KEY: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} + DEB_SIGNING_PASSPHRASE: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} RPM_GPG_KEY_ID: fd4bf915 RPM_GPG_KEY_NAME: "Datadog, Inc. RPM key" - RPM_GPG_KEY_SSM_NAME: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} - RPM_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} + RPM_GPG_KEY: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} + RPM_SIGNING_PASSPHRASE: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} # docker.io authentication - DOCKER_REGISTRY_LOGIN_SSM_KEY: docker_hub_login - DOCKER_REGISTRY_PWD_SSM_KEY: docker_hub_pwd + DOCKER_REGISTRY_LOGIN: docker_hub_login + DOCKER_REGISTRY_PWD: docker_hub_pwd DOCKER_REGISTRY_URL: docker.io KITCHEN_INFRASTRUCTURE_FLAKES_RETRY: 2 CLANG_LLVM_VER: 12.0.1 diff --git a/tasks/unit_tests/version_tests.py b/tasks/unit_tests/version_tests.py index 9cc343cdbb2c7..2f6255c68310c 100644 --- a/tasks/unit_tests/version_tests.py +++ b/tasks/unit_tests/version_tests.py @@ -210,6 +210,48 @@ def test_next_version_promote_rc(self): self.assertEqual(new_version, expected_version) +class TestPreviousRCVersion(unittest.TestCase): + def test_non_rc(self): + version = Version(major=1, minor=1) + with self.assertRaises(RuntimeError): + version.previous_rc_version() + + def test_rc_1_no_patch(self): + version = Version(major=1, minor=1, rc=1) + with self.assertRaises(RuntimeError): + version.previous_rc_version() + + def test_rc_1(self): + version = Version(major=1, minor=1, patch=1, rc=1) + previous = str(version.previous_rc_version()) + self.assertEqual(previous, "1.1.1-devel") + + def test_rc_42(self): + version = Version(major=1, minor=1, patch=1, rc=42) + previous = str(version.previous_rc_version()) + self.assertEqual(previous, "1.1.1-rc.41") + + +class TestQALabel(unittest.TestCase): + expected = "1.2.0-qa" + + def test_minor_major(self): + v = Version(1, 2) + self.assertEqual(v.qa_label(), self.expected) + + def test_minor_major_patch(self): + v = Version(1, 2, patch=0) + self.assertEqual(v.qa_label(), self.expected) + + def test_minor_major_patch_devel(self): + v = Version(1, 2, devel=True) + self.assertEqual(v.qa_label(), self.expected) + + def test_minor_major_patch_rc(self): + v = Version(1, 2, rc=1) + self.assertEqual(v.qa_label(), self.expected) + + class TestQueryVersion(unittest.TestCase): @patch.dict(os.environ, {"BUCKET_BRANCH": "dev"}, clear=True) def test_on_dev_bucket(self): diff --git a/tasks/vscode.py b/tasks/vscode.py index e528d188e1c79..e792327343084 100644 --- a/tasks/vscode.py +++ b/tasks/vscode.py @@ -44,7 +44,7 @@ def setup(ctx, force=False): print(color_message("* Setting up tasks", Color.BOLD)) setup_tasks(ctx, force) print(color_message("* Setting up tests", Color.BOLD)) - setup_tasks(ctx, force) + setup_tests(ctx, force) print(color_message("* Setting up settings", Color.BOLD)) setup_settings(ctx, force) print(color_message("* Setting up launch settings", Color.BOLD)) diff --git a/tasks/winbuildscripts/Generate-OCIPackage.ps1 b/tasks/winbuildscripts/Generate-OCIPackage.ps1 index 274778d4a16b8..ee48f4badc9fe 100644 --- a/tasks/winbuildscripts/Generate-OCIPackage.ps1 +++ b/tasks/winbuildscripts/Generate-OCIPackage.ps1 @@ -1,8 +1,7 @@ Param( - [Parameter(Mandatory=$true,Position=0)] - [ValidateSet("datadog-agent", "datadog-installer")] - [String] - $package + [Parameter(Mandatory=$true)] + [string] $package, + [string] $version ) $omnibusOutput = "$($Env:REPO_ROOT)\omnibus\pkg\" @@ -11,10 +10,15 @@ if (-not (Test-Path C:\tools\datadog-package.exe)) { Write-Host "Downloading datadog-package.exe" (New-Object System.Net.WebClient).DownloadFile("https://dd-agent-omnibus.s3.amazonaws.com/datadog-package.exe", "C:\\tools\\datadog-package.exe") } -$rawAgentVersion = "{0}-1" -f (inv agent.version --url-safe --major-version 7) -Write-Host "Detected agent version ${rawAgentVersion}" +if ([string]::IsNullOrWhitespace($version)) { + $version = "{0}-1" -f (inv agent.version --url-safe --major-version 7) + Write-Host "Detected agent version ${version}" +} +if (-not $version.EndsWith("-1")) { + $version += "-1" +} -$packageName = "${package}-${rawAgentVersion}-windows-amd64.oci.tar" +$packageName = "${package}-${version}-windows-amd64.oci.tar" if (Test-Path $omnibusOutput\$packageName) { Remove-Item $omnibusOutput\$packageName @@ -23,9 +27,9 @@ if (Test-Path $omnibusOutput\$packageName) { # datadog-package takes a folder as input and will package everything in that, so copy the msi to its own folder Remove-Item -Recurse -Force C:\oci-pkg -ErrorAction SilentlyContinue New-Item -ItemType Directory C:\oci-pkg -Copy-Item (Get-ChildItem $omnibusOutput\${package}-${rawAgentVersion}-x86_64.msi).FullName -Destination C:\oci-pkg\${package}-${rawAgentVersion}-x86_64.msi +Copy-Item (Get-ChildItem $omnibusOutput\${package}-${version}-x86_64.msi).FullName -Destination C:\oci-pkg\${package}-${version}-x86_64.msi # The argument --archive-path ".\omnibus\pkg\datadog-agent-${version}.tar.gz" is currently broken and has no effects -& C:\tools\datadog-package.exe create --package $package --os windows --arch amd64 --archive --version $rawAgentVersion C:\oci-pkg +& C:\tools\datadog-package.exe create --package $package --os windows --arch amd64 --archive --version $version C:\oci-pkg -Move-Item ${package}-${rawAgentVersion}-windows-amd64.tar $omnibusOutput\$packageName +Move-Item ${package}-${version}-windows-amd64.tar $omnibusOutput\$packageName diff --git a/tasks/winbuildscripts/buildinstaller.bat b/tasks/winbuildscripts/buildinstaller.bat index a9d7b3378ea05..bd41ccad5dc2a 100644 --- a/tasks/winbuildscripts/buildinstaller.bat +++ b/tasks/winbuildscripts/buildinstaller.bat @@ -18,6 +18,8 @@ set OMNIBUS_BUILD=omnibus.build @rem It's not strictly needed, as we will only invoke the .cmd for the Datadog Installer in the invoke task build-installer, but it's a good practice to be consistent. set OMNIBUS_TARGET=installer set OMNIBUS_ARGS=%OMNIBUS_ARGS% --target-project %OMNIBUS_TARGET% +@rem Have to use arcane syntax to store AGENT_VERSION, see https://ss64.com/nt/for_cmd.html +FOR /F "tokens=*" %%g IN ('inv agent.version --url-safe --major-version 7') do (SET AGENT_VERSION=%%g) if DEFINED GOMODCACHE set OMNIBUS_ARGS=%OMNIBUS_ARGS% --go-mod-cache %GOMODCACHE% if DEFINED USE_S3_CACHING set OMNIBUS_ARGS=%OMNIBUS_ARGS% %USE_S3_CACHING% @@ -36,19 +38,18 @@ pip3 install -r requirements.txt inv -e %OMNIBUS_BUILD% %OMNIBUS_ARGS% --skip-deps --release-version %RELEASE_VERSION% || exit /b 1 inv -e msi.build-installer || exit /b 2 -Powershell -C "./tasks/winbuildscripts/Generate-OCIPackage.ps1 datadog-installer" +Powershell -C "./tasks/winbuildscripts/Generate-OCIPackage.ps1 -package 'datadog-installer'" REM show output package directories (for debugging) dir \omnibus-ruby\pkg\ - +dir C:\opt\datadog-installer\ dir %REPO_ROOT%\omnibus\pkg\ REM copy resulting packages to expected location for collection by gitlab. if not exist c:\mnt\omnibus\pkg\ mkdir c:\mnt\omnibus\pkg\ || exit /b 5 copy %REPO_ROOT%\omnibus\pkg\* c:\mnt\omnibus\pkg\ || exit /b 6 - -REM show output binary directories (for debugging) -dir C:\opt\datadog-installer\ +REM Save the installer.exe for bootstrapping +copy C:\opt\datadog-installer\datadog-installer.exe c:\mnt\omnibus\pkg\datadog-installer-%AGENT_VERSION%-1-x86_64.exe || exit /b 7 goto :EOF diff --git a/tasks/winbuildscripts/dobuild.bat b/tasks/winbuildscripts/dobuild.bat index 852903f987b50..6af97b5a3d3bf 100644 --- a/tasks/winbuildscripts/dobuild.bat +++ b/tasks/winbuildscripts/dobuild.bat @@ -58,7 +58,7 @@ if "%OMNIBUS_TARGET%" == "main" ( REM Build the OCI package for the Agent 7 only. if %MAJOR_VERSION% == 7 ( - Powershell -C "./tasks/winbuildscripts/Generate-OCIPackage.ps1 datadog-agent" + Powershell -C "./tasks/winbuildscripts/Generate-OCIPackage.ps1 -package 'datadog-agent'" ) popd diff --git a/tasks/winbuildscripts/extract-modcache.bat b/tasks/winbuildscripts/extract-modcache.bat index 6d516b4139311..562d6d5647257 100644 --- a/tasks/winbuildscripts/extract-modcache.bat +++ b/tasks/winbuildscripts/extract-modcache.bat @@ -33,7 +33,7 @@ if exist %MODCACHE_XZ_FILE% ( REM This shouldn't have any negative impact: since modules are REM stored per version and hash, files that get replaced will REM get replaced by the same files - Powershell -C "7z x %MODCACHE_TAR_FILE% -o%GOMODCACHE% -aoa -bt" + Powershell -C "7z x %MODCACHE_TAR_FILE% -o%GOMODCACHE%\cache -aoa -bt" @echo Modcache extracted ) else ( @echo %MODCACHE_XZ_FILE% not found, dependencies will be downloaded diff --git a/tasks/winbuildscripts/unittests.ps1 b/tasks/winbuildscripts/unittests.ps1 index 8323cdd0afe75..8d09afd956ae5 100644 --- a/tasks/winbuildscripts/unittests.ps1 +++ b/tasks/winbuildscripts/unittests.ps1 @@ -60,9 +60,14 @@ $err = $LASTEXITCODE # Ignore upload failures $ErrorActionPreference = "Continue" +$tmpfile = [System.IO.Path]::GetTempFileName() # 1. Upload coverage reports to Codecov -$Env:CODECOV_TOKEN=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:CODECOV_TOKEN_SSM_NAME) +& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" -parameterName "$Env:CODECOV_TOKEN" -tempFile "$tmpfile" +If ($LASTEXITCODE -ne "0") { + exit $LASTEXITCODE +} +$Env:CODECOV_TOKEN=$(cat "$tmpfile") & inv -e coverage.upload-to-codecov $Env:COVERAGE_CACHE_FLAG # 2. Upload junit files @@ -70,10 +75,19 @@ $Env:CODECOV_TOKEN=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:CO Get-ChildItem -Path "$UT_BUILD_ROOT" -Filter "junit-out-*.xml" -Recurse | ForEach-Object { Copy-Item -Path $_.FullName -Destination C:\mnt } -$Env:DATADOG_API_KEY=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:API_KEY_ORG2_SSM_NAME) -$Env:GITLAB_TOKEN=$(& "$UT_BUILD_ROOT\tools\ci\aws_ssm_get_wrapper.ps1" $Env:GITLAB_TOKEN_SSM_NAME) -& inv -e junit-upload --tgz-path $Env:JUNIT_TAR +& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" -parameterName "$Env:API_KEY_ORG2" -tempFile "$tmpfile" +If ($LASTEXITCODE -ne "0") { + exit $LASTEXITCODE +} +$Env:DATADOG_API_KEY=$(cat "$tmpfile") +& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" -parameterName "$Env:GITLAB_TOKEN" -tempFile "$tmpfile" +If ($LASTEXITCODE -ne "0") { + exit $LASTEXITCODE +} +$Env:GITLAB_TOKEN=$(cat "$tmpfile") +Remove-Item "$tmpfile" +& inv -e junit-upload --tgz-path $Env:JUNIT_TAR if($err -ne 0){ Write-Host -ForegroundColor Red "test failed $err" [Environment]::Exit($err) diff --git a/test/benchmarks/util.go b/test/benchmarks/util.go index 7cfdfacc958d6..898ef5daad46b 100644 --- a/test/benchmarks/util.go +++ b/test/benchmarks/util.go @@ -10,7 +10,8 @@ import ( "math/rand" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" @@ -31,7 +32,7 @@ func TimeNowNano() float64 { // InitLogging inits default logger func InitLogging(level string) error { - err := config.SetupLogger(config.LoggerName("test"), level, "", "", false, true, false) + err := pkglogsetup.SetupLogger(pkglogsetup.LoggerName("test"), level, "", "", false, true, false, pkgconfigsetup.Datadog()) if err != nil { return fmt.Errorf("Unable to initiate logger: %s", err) } diff --git a/test/e2e/README.md b/test/e2e/README.md deleted file mode 100644 index 5bf55766a9b76..0000000000000 --- a/test/e2e/README.md +++ /dev/null @@ -1,122 +0,0 @@ -# End to End testing - -# ToC -- [How it works](#how-it-works) - * [Setup instance](#setup-instance) - * [Run instance](#run-instance) - * [Command line](#command-line) - * [AWS development](#aws-development) - * [Locally](#locally) -- [Argo workflow](#argo-workflow) - * [argo assertion](#argo-assertion) - * [argo container](#argo-container) -- [Upgrade](#upgrade---bump) - -# How it works - -There are 3 main directories: -- [argo-workflows](./argo-workflows) - Specification of the end to end testing - -- [containers](./containers) - Custom container images needed within the workflows - -- [scripts](./scripts) - - [`setup-instance`](./scripts/setup-instance) - Entrypoint and scripts dedicated for environments (locally, AWS dev, AWS gitlab) - - [`run-instance`](./scripts/run-instance) - Scripts executed in the argo-machine (locally, AWS instance) - -## `setup-instance` - - - -## `run-instance` - - - -## Command line - -### AWS development - -```bash -$ cd ${GOPATH}/src/github.com/DataDog/datadog-agent -$ aws-vault exec ${DEV} -- inv -e e2e-tests -t dev --agent-image datadog/agent-dev:master --dca-image datadog/cluster-agent-dev:master -``` - -### Locally (Linux only) - -```bash -$ inv -e e2e-tests -t local --agent-image datadog/agent-dev:master --dca-image datadog/cluster-agent-dev:master -``` - -# Argo workflow - -The argo documentation is available [here](https://argo-cd.readthedocs.io/en/stable/), there are a lot of examples [here](https://github.com/argoproj/argo/tree/master/examples) too. - -## Argo assertion - -To assert something in an argo workflow, you need to create a mongodb query: -```yaml -name: find-kubernetes-state-deployments -activeDeadlineSeconds: 200 -script: - image: mongo:3.6.3 - command: [mongo, "fake-datadog.default.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: "kubernetes_state.deployment.replicas_available", - tags: {$all: ["namespace:default", "deployment:fake-datadog"] }, - "points.0.1": { $eq: 1} }); - print("find: " + nb) - if (nb != 0) { - break; - } - prevNb = nb; - sleep(2000); - } -``` - -This is an infinite loop with a timeout set by `activeDeadlineSeconds: 200`. -The source is EOF to the command, equivalent to: -```bash -mongo "fake-datadog.default.svc.cluster.local/datadog" << EOF -while (1) -[...] -EOF -``` - -Try to maximise the usage of MongoDB query system without rewriting too much logic in JavaScript. - -See some examples [here](./containers/fake_datadog/README.md#find) - -To discover more MongoDB capabilities: -- [find](https://docs.mongodb.com/manual/tutorial/query-documents/) -- [aggregation](https://docs.mongodb.com/manual/aggregation/) - -## Argo container - -If you need to add a non existing public container in the workflow, create it in the [container directory](./containers). - -But, keep in mind this become an additional piece of software to maintain. - -# Upgrade - bump - -This section helps you to upgrade any part of the end to end testing. - -The current end to end testing pipeline relies on: -* [Argo](https://github.com/argoproj/argo) - -Upgrade Argo version by changing version in `test/e2e/scripts/run-instance/20-argo-download.sh` and setting new checksum value in `test/e2e/scripts/run-instance/argo.sha512sum` - -* [Kind](https://kind.sigs.k8s.io/) - -Upgrade Kind version by changing version in `test/e2e/scripts/run-instance/10-setup-kind.sh`. -By default Kind will use the latest stable Kubernetes known at the time of Kind release. - -* [Fedora CoreOS](https://getfedora.org/en/coreos?stream=stable) - -You don't need to update CoreOS version as the setup script (`test/e2e/scripts/setup-instance/00-entrypoint-[dev|gitlab].sh`) always uses the latest `stable` version by default. - -If needed, use the [ignition-linter](https://coreos.com/validate/) to validate any changes. diff --git a/test/e2e/argo-workflows/cspm-workflow.yaml b/test/e2e/argo-workflows/cspm-workflow.yaml deleted file mode 100644 index c124dbf807d74..0000000000000 --- a/test/e2e/argo-workflows/cspm-workflow.yaml +++ /dev/null @@ -1,121 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: argo-datadog-agent- -spec: - entrypoint: main - onExit: exit-handler - arguments: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-agent-site - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - volumes: - - name: datadog-agent-volume - hostPath: - path: /host/datadog-agent - - name: host-root-proc - hostPath: - path: /proc - templates: - - name: main - inputs: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-agent-site - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - steps: - - - name: start-fake-datadog - templateRef: - name: fake-datadog - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-dsd-hostname - templateRef: - name: dsd-hostname - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: start-datadog-agent - templateRef: - name: datadog-agent - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: agent-image-repository - value: "{{inputs.parameters.datadog-agent-image-repository}}" - - name: agent-image-tag - value: "{{inputs.parameters.datadog-agent-image-tag}}" - - name: cluster-agent-image-repository - value: "{{inputs.parameters.datadog-cluster-agent-image-repository}}" - - name: cluster-agent-image-tag - value: "{{inputs.parameters.datadog-cluster-agent-image-tag}}" - - name: site - value: "{{inputs.parameters.datadog-agent-site}}" - - name: dd-url - value: "" - - name: ci_commit_short_sha - value: "{{inputs.parameters.ci_commit_short_sha}}" - - name: ci_pipeline_id - value: "{{inputs.parameters.ci_pipeline_id}}" - - name: ci_job_id - value: "{{inputs.parameters.ci_job_id}}" - - name: remote_configuration_enabled - value: "false" - - name: networkmonitoring_enabled - value: "false" - - - - name: wait-datadog-agent - templateRef: - name: datadog-agent - template: wait - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: test-cspm-e2e - templateRef: - name: datadog-agent - template: test-cspm-e2e - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: site - value: "{{inputs.parameters.datadog-agent-site}}" - - - name: exit-handler - steps: - - - name: diagnose - template: diagnose - - - name: diagnose - steps: - - - name: diagnose-datadog-agent - templateRef: - name: datadog-agent - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" diff --git a/test/e2e/argo-workflows/default-workflow.yaml b/test/e2e/argo-workflows/default-workflow.yaml deleted file mode 100644 index 9c9f54e30d89c..0000000000000 --- a/test/e2e/argo-workflows/default-workflow.yaml +++ /dev/null @@ -1,352 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: argo-datadog-agent- -spec: - entrypoint: main - onExit: exit-handler - arguments: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - volumes: - - name: datadog-agent-volume - hostPath: - path: /host/datadog-agent - templates: - - name: main - inputs: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - steps: - - - name: start-fake-datadog - templateRef: - name: fake-datadog - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-redis - templateRef: - name: redis - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-cpu-stress - templateRef: - name: cpu-stress - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-dsd-hostname - templateRef: - name: dsd-hostname - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-logs-hello-world - templateRef: - name: logs-hello-world - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-nginx - templateRef: - name: nginx - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: fake-dd-reset - templateRef: - name: fake-datadog - template: reset - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: start-datadog-agent - templateRef: - name: datadog-agent - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: agent-image-repository - value: "{{inputs.parameters.datadog-agent-image-repository}}" - - name: agent-image-tag - value: "{{inputs.parameters.datadog-agent-image-tag}}" - - name: dd-url - value: "http://fake-datadog.{{workflow.namespace}}.svc.cluster.local" - - name: site - value: "" - - name: cluster-agent-image-repository - value: "{{inputs.parameters.datadog-cluster-agent-image-repository}}" - - name: cluster-agent-image-tag - value: "{{inputs.parameters.datadog-cluster-agent-image-tag}}" - - name: ci_commit_short_sha - value: "{{inputs.parameters.ci_commit_short_sha}}" - - name: ci_pipeline_id - value: "{{inputs.parameters.ci_pipeline_id}}" - - name: ci_job_id - value: "{{inputs.parameters.ci_job_id}}" - - name: remote_configuration_enabled - value: "false" - - name: networkmonitoring_enabled - value: "false" - - - - name: wait-datadog-agent - templateRef: - name: datadog-agent - template: wait - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: start-busybox - templateRef: - name: busybox - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: test-datadog-agent - templateRef: - name: datadog-agent - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-redis - templateRef: - name: redis - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-cpu - templateRef: - name: cpu-stress - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-dsd - templateRef: - name: dsd-hostname - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-nginx - templateRef: - name: nginx - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-busybox - templateRef: - name: busybox - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: stop-redis - templateRef: - name: redis - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-nginx - templateRef: - name: nginx - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: no-more-redis - templateRef: - name: redis - template: no-more-metrics - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: no-more-nginx - templateRef: - name: nginx - template: no-more-metrics - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: exit-handler - steps: - - - name: delete - template: delete - when: "{{workflow.status}} == Succeeded" - - - name: diagnose - template: diagnose - when: "{{workflow.status}} != Succeeded" - - - name: delete - steps: - - - name: stop-datadog-agent - templateRef: - name: datadog-agent - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-redis - templateRef: - name: redis - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-cpu-stress - templateRef: - name: cpu-stress - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-dsd-hostname - templateRef: - name: dsd-hostname - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-logs-hello-world - templateRef: - name: logs-hello-world - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-nginx - templateRef: - name: nginx - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-fake-datadog - templateRef: - name: fake-datadog - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-busybox - templateRef: - name: busybox - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: diagnose - steps: - - - name: diagnose-datadog-agent - templateRef: - name: datadog-agent - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: diagnose-fake-datadog - templateRef: - name: fake-datadog - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: diagnose-nginx - templateRef: - name: nginx - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: diagnose-busybox - templateRef: - name: busybox - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" diff --git a/test/e2e/argo-workflows/otlp-workflow.yaml b/test/e2e/argo-workflows/otlp-workflow.yaml deleted file mode 100644 index 9320d2ae9ad7c..0000000000000 --- a/test/e2e/argo-workflows/otlp-workflow.yaml +++ /dev/null @@ -1,156 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: argo-datadog-agent- -spec: - entrypoint: main - onExit: exit-handler - arguments: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - volumes: - - name: datadog-agent-volume - hostPath: - path: /host/datadog-agent - templates: - - name: main - inputs: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - steps: - - - name: start-fake-datadog - templateRef: - name: fake-datadog - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-otlp-test - templateRef: - name: otlp-test - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: fake-dd-reset - templateRef: - name: fake-datadog - template: reset - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: start-datadog-agent - templateRef: - name: datadog-agent - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: agent-image-repository - value: "{{inputs.parameters.datadog-agent-image-repository}}" - - name: agent-image-tag - value: "{{inputs.parameters.datadog-agent-image-tag}}" - - name: dd-url - value: "http://fake-datadog.{{workflow.namespace}}.svc.cluster.local" - - name: site - value: "" - - name: cluster-agent-image-repository - value: "{{inputs.parameters.datadog-cluster-agent-image-repository}}" - - name: cluster-agent-image-tag - value: "{{inputs.parameters.datadog-cluster-agent-image-tag}}" - - name: ci_commit_short_sha - value: "{{inputs.parameters.ci_commit_short_sha}}" - - name: ci_pipeline_id - value: "{{inputs.parameters.ci_pipeline_id}}" - - name: ci_job_id - value: "{{inputs.parameters.ci_job_id}}" - - name: remote_configuration_enabled - value: "false" - - name: networkmonitoring_enabled - value: "false" - - - - name: wait-datadog-agent - templateRef: - name: datadog-agent - template: wait - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: test-otlp - templateRef: - name: otlp-test - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: exit-handler - steps: - - - name: delete - template: delete - when: "{{workflow.status}} == Succeeded" - - - name: diagnose - template: diagnose - when: "{{workflow.status}} != Succeeded" - - - name: delete - steps: - - - name: stop-datadog-agent - templateRef: - name: datadog-agent - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-otlp-test - templateRef: - name: otlp-test - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: diagnose - steps: - - - name: diagnose-datadog-agent - templateRef: - name: datadog-agent - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: diagnose-otlp-test - templateRef: - name: otlp-test - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" diff --git a/test/e2e/argo-workflows/templates/cpu-stress.yaml b/test/e2e/argo-workflows/templates/cpu-stress.yaml deleted file mode 100644 index e210d9aa0eaf2..0000000000000 --- a/test/e2e/argo-workflows/templates/cpu-stress.yaml +++ /dev/null @@ -1,175 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: WorkflowTemplate -metadata: - name: cpu-stress -spec: - templates: - - name: create - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: cpu-stress - namespace: {{inputs.parameters.namespace}} - spec: - replicas: 1 - selector: - matchLabels: - app: cpu-stress - template: - metadata: - labels: - app: cpu-stress - spec: - containers: - - name: cpu-stress - image: datadog/docker-library:progrium_stress - args: - - "--cpu" - - "2" - resources: - requests: - memory: "64Mi" - cpu: "1" - limits: - memory: "64Mi" - cpu: "1" - - - name: delete - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: cpu-stress - namespace: {{inputs.parameters.namespace}} - - - name: find-metrics-cpu-container-runtime - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: "cri.cpu.usage", - tags: { $all: ["kube_deployment:cpu-stress", "kube_container_name:cpu-stress"] }, - "points.0.1": { $gt: 950000000, $lt: 1010000000 } }).count(); - print("find: " + nb) - if (nb != 0) { - print("cpu value in target range") - break; - } - sleep(2000); - } - - - name: find-metrics-cpu-kubelet - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: "kubernetes.cpu.usage.total", - tags: { $all: ["kube_deployment:cpu-stress", "kube_container_name:cpu-stress"] }, - "points.0.1": { $gt: 800000000, $lt: 1200000000 } }).count(); - print("find: " + nb) - if (nb != 0) { - print("cpu value in target range") - break; - } - sleep(2000); - } - - - name: find-metrics-cpu-system - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - sleep(2000); - - // Determine the hostname the cpu-stress pod is running on - var point = db.series.find({ - metric: "kubernetes.cpu.usage.total", - tags: {$all: ["kube_deployment:cpu-stress", "kube_container_name:cpu-stress"]} - }).limit(1).sort({$natural:-1})[0]; - if (!point) { - print("cannot get hostname for pod"); - continue; - } - hostname = point.host; - - // Get the number of CPUs on that host - var point = db.series.find({ - metric: "kubernetes_state.node.cpu_capacity", - host: hostname - }).limit(1).sort({$natural:-1})[0]; - if (!point) { - print("cannot get cpu capacity for host " + hostname); - continue; - } - cpucount = point.points[0][1]; - print("cpu count: " + cpucount) - - // Get the user CPU usage, make sure it's above 39% non-normalized - var point = db.series.find({ - metric: "system.cpu.user", - host: hostname - }).limit(1).sort({$natural:-1})[0]; - if (!point) { - print("no system.cpu.usage metric reported for host " + hostname) - continue; - } - print("raw value: " + point.points[0][1]) - value = point.points[0][1] * cpucount; - print("cpu value: " + value) - if (value > 95) { - print("cpu value in target range"); - break; - } - } - - - name: test - inputs: - parameters: - - name: namespace - steps: - - - name: find-metrics-cpu-container-runtime - template: find-metrics-cpu-container-runtime - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-cpu-kubelet - template: find-metrics-cpu-kubelet - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-cpu-system - template: find-metrics-cpu-system - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" diff --git a/test/e2e/argo-workflows/templates/datadog-agent.yaml b/test/e2e/argo-workflows/templates/datadog-agent.yaml deleted file mode 100644 index 2c040444f3104..0000000000000 --- a/test/e2e/argo-workflows/templates/datadog-agent.yaml +++ /dev/null @@ -1,666 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: WorkflowTemplate -metadata: - name: datadog-agent -spec: - templates: - - name: create - inputs: - parameters: - - name: namespace - - name: dd-url - - name: site - - name: agent-image-repository - - name: agent-image-tag - - name: cluster-agent-image-repository - - name: cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - - name: remote_configuration_enabled - - name: networkmonitoring_enabled - script: - image: alpine/k8s:1.27.1 - envFrom: - - secretRef: - name: dd-keys - command: [sh] - source: | - set -euo pipefail - - cat > /tmp/values.yaml <& /dev/null - sleep 0.01 - done ) & - - until [[ "$(kubectl --namespace {{inputs.parameters.namespace}} get hpa nginxext -o jsonpath='{.status.currentReplicas}')" -gt 1 ]]; do - kubectl --namespace {{inputs.parameters.namespace}} describe hpa nginxext - sleep 1 - done - - - name: test - inputs: - parameters: - - name: namespace - dag: - tasks: - - name: find-kube-state-metrics - template: find-kube-state-metrics - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-nginx - template: find-metrics-nginx - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: validate-hpa - template: validate-hpa - dependencies: - - find-metrics-nginx - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: run-hpa - template: run-hpa - dependencies: - - validate-hpa - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: no-more-metrics - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - var prevNb = -1; - while (1) { - var nb = db.series.find({ - metric: {$regex: "nginx*"} - }).count(); - - print("prev-find: " + prevNb) - print("find: " + nb) - if (nb == prevNb) { - break; - } - prevNb = nb; - sleep(30000); - } - - - name: describe-hpa - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: alpine/k8s:1.27.1 - command: [sh] - source: | - set -euo pipefail - - kubectl --namespace {{inputs.parameters.namespace}} describe hpa nginxext - - - name: diagnose - inputs: - parameters: - - name: namespace - steps: - - - name: describe-hpa - template: describe-hpa - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" diff --git a/test/e2e/argo-workflows/templates/otlp-test.yaml b/test/e2e/argo-workflows/templates/otlp-test.yaml deleted file mode 100644 index 9f9716ffd9fd2..0000000000000 --- a/test/e2e/argo-workflows/templates/otlp-test.yaml +++ /dev/null @@ -1,229 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: WorkflowTemplate -metadata: - name: otlp-test -spec: - templates: - - name: create-sender-config - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: v1 - kind: ConfigMap - metadata: - name: sender-config - namespace: {{inputs.parameters.namespace}} - data: - sender-config: |+ - receivers: - file: - path: /etc/data/metrics.data - loop: - enabled: true - period: 10s - exporters: - otlp: - endpoint: ${DD_AGENT_OTLP_ENDPOINT} - tls: - insecure: true - service: - pipelines: - metrics: - receivers: [file] - exporters: [otlp] - - name: create-metrics-data - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: v1 - kind: ConfigMap - metadata: - name: metrics-data - namespace: {{inputs.parameters.namespace}} - data: - metrics-data: |+ - {"resourceMetrics":[{"resource":{"attributes":[{"key":"telemetry.sdk.language","value":{"stringValue":"go"}},{"key":"telemetry.sdk.name","value":{"stringValue":"opentelemetry"}},{"key":"telemetry.sdk.version","value":{"stringValue":"1.0.0"}}]},"instrumentationLibraryMetrics":[{"instrumentationLibrary":{"name":"test-meter"},"metrics":[{"name":"an_important_metric","description":"Measures the cumulative epicness of the app","sum":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674532223257300","asDouble":14}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE","isMonotonic":true}},{"name":"test2.sendtodev.histogram","description":"IO read bytes","histogram":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674532223257300","count":"42","sum":1541400,"bucketCounts":["14","0","14","0","0","14","0","0","0","0","0","0"],"explicitBounds":[5000,10000,25000,50000,100000,250000,500000,1000000,2500000,5000000,10000000]}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE"}}]}],"schemaUrl":"https://opentelemetry.io/schemas/v1.4.0"}]} - {"resourceMetrics":[{"resource":{"attributes":[{"key":"telemetry.sdk.language","value":{"stringValue":"go"}},{"key":"telemetry.sdk.name","value":{"stringValue":"opentelemetry"}},{"key":"telemetry.sdk.version","value":{"stringValue":"1.0.0"}}]},"instrumentationLibraryMetrics":[{"instrumentationLibrary":{"name":"test-meter"},"metrics":[{"name":"an_important_metric","description":"Measures the cumulative epicness of the app","sum":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674534223387200","asDouble":27}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE","isMonotonic":true}},{"name":"test2.sendtodev.histogram","description":"IO read bytes","histogram":{"dataPoints":[{"attributes":[{"key":"labelA","value":{"stringValue":"chocolate"}},{"key":"labelB","value":{"stringValue":"raspberry"}},{"key":"labelC","value":{"stringValue":"vanilla"}}],"startTimeUnixNano":"1637674530222121000","timeUnixNano":"1637674534223387200","count":"81","sum":2972700,"bucketCounts":["27","0","27","0","0","27","0","0","0","0","0","0"],"explicitBounds":[5000,10000,25000,50000,100000,250000,500000,1000000,2500000,5000000,10000000]}],"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE"}}]}],"schemaUrl":"https://opentelemetry.io/schemas/v1.4.0"}]} - - name: create-deployment - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: otlp-sender - namespace: {{inputs.parameters.namespace}} - spec: - replicas: 1 - selector: - matchLabels: - app: otlp-sender - template: - metadata: - labels: - app: otlp-sender - spec: - containers: - - name: sender - image: datadog/docker-library:e2e-otlp-sender_latest - resources: - requests: - memory: "32Mi" - cpu: "100m" - limits: - memory: "32Mi" - cpu: "100m" - env: - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: DD_AGENT_OTLP_ENDPOINT - value: http://$(DD_AGENT_HOST):4317 - volumeMounts: - - name: "sender-config" - mountPath: "/etc/otel" - - name: "metrics-data" - mountPath: "/etc/data" - volumes: - - name: "sender-config" - configMap: - name: "sender-config" - items: - - key: sender-config - path: config.yaml - - name: "metrics-data" - configMap: - name: "metrics-data" - items: - - key: metrics-data - path: metrics.data - - name: create - inputs: - parameters: - - name: namespace - steps: - - - name: sender-config - template: create-sender-config - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: metrics-data - template: create-metrics-data - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment - template: create-deployment - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: delete-deployment - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: otlp-sender - namespace: {{inputs.parameters.namespace}} - - name: delete-sender-config - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: v1 - kind: ConfigMap - metadata: - name: sender-config - namespace: {{inputs.parameters.namespace}} - - name: delete-metrics-data - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: v1 - kind: ConfigMap - metadata: - name: metrics-data - namespace: {{inputs.parameters.namespace}} - - name: delete - inputs: - parameters: - - name: namespace - steps: - - - name: deployment - template: delete-deployment - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: sender-config - template: delete-sender-config - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: metrics-data - template: delete-metrics-data - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: test - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - sleep(2000); - - // Gauges - var nb = db.series.find({metric: "an_important_metric"}).count(); - if (nb == 0) { - print("no 'an_important_metric' metric found"); - continue; - } - - print("All good"); - break; - } - - name: diagnose - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: alpine/k8s:1.27.1 - command: [sh] - source: | - set -euo pipefail - - kubectl --namespace {{inputs.parameters.namespace}} get pods -l app=otlp-sender -o custom-columns=name:metadata.name --no-headers | while read -r po; do - kubectl --namespace {{inputs.parameters.namespace}} logs $po -c sender || true - done diff --git a/test/e2e/argo-workflows/templates/redis.yaml b/test/e2e/argo-workflows/templates/redis.yaml deleted file mode 100644 index df3cbf79ea615..0000000000000 --- a/test/e2e/argo-workflows/templates/redis.yaml +++ /dev/null @@ -1,366 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: WorkflowTemplate -metadata: - name: redis -spec: - templates: - - name: create-service - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: v1 - kind: Service - metadata: - name: redis - namespace: {{inputs.parameters.namespace}} - spec: - ports: - - port: 6379 - protocol: TCP - targetPort: 6379 - name: redis - selector: - app: redis - type: ClusterIP - - - name: create-deployment - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: redis - namespace: {{inputs.parameters.namespace}} - spec: - selector: - matchLabels: - app: redis - replicas: 1 - template: - metadata: - labels: - app: redis - annotations: - ad.datadoghq.com/redis.check_names: '["redisdb"]' - ad.datadoghq.com/redis.init_configs: '[{}]' - ad.datadoghq.com/redis.instances: '[{"host": "%%host%%", "port": "%%port%%"}]' - spec: - initContainers: - - name: useless - image: busybox:latest - command: - - /bin/true - resources: - requests: - memory: "32Mi" - cpu: "25m" - limits: - memory: "64Mi" - cpu: "50m" - containers: - - name: redis - image: redis - ports: - - name: redis - containerPort: 6379 - resources: - requests: - memory: "64Mi" - cpu: "50m" - limits: - memory: "128Mi" - cpu: "100m" - - - name: create-deployment-unready - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: redis-unready - namespace: {{inputs.parameters.namespace}} - spec: - replicas: 1 - selector: - matchLabels: - app: redis - template: - metadata: - labels: - app: redis - annotations: - ad.datadoghq.com/tolerate-unready: "true" - spec: - containers: - - name: redis-unready - image: redis - ports: - - name: redis - containerPort: 6379 - resources: - requests: - memory: "64Mi" - cpu: "50m" - limits: - memory: "128Mi" - cpu: "100m" - readinessProbe: - tcpSocket: - port: 8080 - initialDelaySeconds: 1 - periodSeconds: 1 - - - name: delete-service - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: v1 - kind: Service - metadata: - name: redis - namespace: {{inputs.parameters.namespace}} - - - name: delete-deployment - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: redis - namespace: {{inputs.parameters.namespace}} - - - name: delete-deployment-unready - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: redis-unready - namespace: {{inputs.parameters.namespace}} - - - name: create - inputs: - parameters: - - name: namespace - steps: - - - name: service - template: create-service - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment - template: create-deployment - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment-unready - template: create-deployment-unready - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: delete - inputs: - parameters: - - name: namespace - steps: - - - name: service - template: delete-service - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment - template: delete-deployment - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment-unready - template: delete-deployment-unready - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: find-kube-state-metrics - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - // This step is intended to test end-to-end scraping of prometheus metrics - // by asserting the value of a few simple metrics collected from the - // kubernetes_state integration. - - while (1) { - var nb = db.series.find({ - metric: "kubernetes_state.deployment.replicas_available", - tags: { $all: ["kube_namespace:{{inputs.parameters.namespace}}", "kube_deployment:redis"] }, - "points.0.1": { $eq: 1 } }).count(); - print("find: " + nb) - if (nb != 0) { - break; - } - sleep(2000); - } - - - name: find-metrics-redis - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: {$regex: "redis*"} - }).count(); - - print("find: " + nb) - if (nb != 0) { - break; - } - sleep(2000); - } - - - name: find-metrics-redis-unready - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: {$regex: "redis*"}, - tags: {$all: ["kube_deployment:redis-unready", "kube_container_name:redis-unready"]} - }).count(); - - print("find: " + nb) - if (nb != 0) { - break; - } - sleep(2000); - } - - - name: find-metrics-redis-tagged - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: {$regex: "redis*"}, - tags: "kube_service:redis" - }).count(); - print("find: " + nb) - if (nb != 0) { - break; - } - sleep(2000); - } - - - name: test - inputs: - parameters: - - name: namespace - steps: - - - name: find-kube-state-metrics - template: find-kube-state-metrics - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-redis - template: find-metrics-redis - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-redis-unready - template: find-metrics-redis-unready - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-redis-tagged - template: find-metrics-redis-tagged - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: no-more-metrics - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - var prevNb = -1; - while (1) { - var nb = db.series.find({ - metric: {$regex: "redis*"} - }).count(); - - print("prev-find: " + prevNb) - print("find: " + nb) - if (nb == prevNb) { - break; - } - prevNb = nb; - sleep(30000); - } - var prevNb = -1 - while (1) { - var nb = db.check_run.find({check: "datadog.agent.check_status", - tags: "check:redisdb", - status: {$ne: 0}}).count(); - - print("prev-find: " + prevNb) - print("find: " + nb) - if (nb == prevNb) { - break; - } - prevNb = nb; - sleep(30000); - } diff --git a/test/e2e/containers/dsd_sender/Dockerfile b/test/e2e/containers/dsd_sender/Dockerfile deleted file mode 100644 index 1b6a5ae33c311..0000000000000 --- a/test/e2e/containers/dsd_sender/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM datadog/docker-library:python_2_7-alpine3_6 - -RUN pip install datadog - -COPY sender.py /sender.py - -CMD [ "python", "/sender.py" ] diff --git a/test/e2e/containers/dsd_sender/Makefile b/test/e2e/containers/dsd_sender/Makefile deleted file mode 100644 index bfdc5e51e0272..0000000000000 --- a/test/e2e/containers/dsd_sender/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -TAG?=latest - -default: build push - -build: - docker build --force-rm -t datadog/docker-library:e2e-dsd-sender_$(TAG) . - -push: - docker push datadog/docker-library:e2e-dsd-sender_$(TAG) diff --git a/test/e2e/containers/dsd_sender/sender.py b/test/e2e/containers/dsd_sender/sender.py deleted file mode 100644 index a589f38a6fa7b..0000000000000 --- a/test/e2e/containers/dsd_sender/sender.py +++ /dev/null @@ -1,23 +0,0 @@ -import time - -import datadog - -client = datadog.dogstatsd.base.DogStatsd(socket_path="/var/run/dogstatsd/dsd.socket") - -while True: - # Nominal case, dsd will inject its hostname - client.gauge('dsd.hostname.e2e', 1, tags=["case:nominal"]) - client.service_check('dsd.hostname.e2e', 0, tags=["case:nominal"]) - client.event('dsd.hostname.e2e', 'text', tags=["case:nominal"]) - - # Force the hostname value - client.gauge('dsd.hostname.e2e', 1, tags=["case:forced", "host:forced"]) - client.service_check('dsd.hostname.e2e', 0, tags=["case:forced"], hostname="forced") - client.event('dsd.hostname.e2e', 'text', tags=["case:forced"], hostname="forced") - - # Force an empty hostname - client.gauge('dsd.hostname.e2e', 1, tags=["case:empty", "host:"]) - client.service_check('dsd.hostname.e2e', 0, tags=["case:empty", "host:"]) - client.event('dsd.hostname.e2e', 'text', tags=["case:empty", "host:"]) - - time.sleep(10) diff --git a/test/e2e/containers/fake_datadog/Dockerfile b/test/e2e/containers/fake_datadog/Dockerfile deleted file mode 100644 index 451b008e217c8..0000000000000 --- a/test/e2e/containers/fake_datadog/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.9-alpine - -COPY app /opt/fake_datadog - -RUN apk update && \ - apk add python3-dev gcc g++ musl-dev libstdc++ && \ - pip install -r /opt/fake_datadog/requirements.txt && \ - apk del python3-dev gcc g++ musl-dev && \ - rm -rf /var/cache/apk/* - -VOLUME /opt/fake_datadog/recorded - -ENV prometheus_multiproc_dir "/var/lib/prometheus" - -CMD ["gunicorn", "--bind", "0.0.0.0:80", "--pythonpath", "/opt/fake_datadog", "api:app"] diff --git a/test/e2e/containers/fake_datadog/Makefile b/test/e2e/containers/fake_datadog/Makefile deleted file mode 100644 index 27bcd71329f18..0000000000000 --- a/test/e2e/containers/fake_datadog/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -TAG?=$(shell date '+%Y%m%d') - -.PHONY: venv pip build default push multiarch - -default: pip - -venv: - virtualenv venv -p python3 - -pip: venv - venv/bin/pip install -r app/requirements.txt - -build: - docker build --force-rm -t datadog/fake-datadog:$(TAG) . - -multiarch: - docker buildx build --platform linux/amd64,linux/arm64 -t datadog/fake-datadog:$(TAG) . --push - -push: - docker push datadog/fake-datadog:$(TAG) diff --git a/test/e2e/containers/fake_datadog/README.md b/test/e2e/containers/fake_datadog/README.md deleted file mode 100644 index efaf81bc96624..0000000000000 --- a/test/e2e/containers/fake_datadog/README.md +++ /dev/null @@ -1,240 +0,0 @@ -# fake_datadog - - -Expose the needed API to make the agent submit payloads. - - -#### API - -Prefer using mongo. - -Get all series: -```bash -curl ${SERVICE_IP}/records/series | jq . -``` - -Get all check_run: -```bash -curl ${SERVICE_IP}/records/check_run | jq . -``` - -Get all intake: -```bash -curl ${SERVICE_IP}/records/intake | jq . -``` - -#### MongoDB - -Explore: -```bash -docker run --rm -it --net=host mongo mongo ${SERVICE_IP}/datadog -``` -```bash -apt-get install -yqq mongodb-clients && mongo ${SERVICE_IP}/datadog -``` -```bash -> show collections -check_run -intake -series - -``` - -#### Find - -Find a metric: -```text -> db.series.findOne() - -{ - "_id" : ObjectId("5ab3e567cd9a72000912abad"), - "metric" : "datadog.agent.running", - "points" : [ - [ - 1521739111, - 1 - ] - ], - "tags" : null, - "host" : "haf", - "type" : "gauge", - "interval" : 0, - "source_type_name" : "System" -} -``` - -Find a metric by metric name: -```text -db.series.findOne({metric: "kubernetes.network.tx_errors"}) - -{ - "_id" : ObjectId("5ab4cca8c914b50008c10615"), - "metric" : "kubernetes.network.tx_errors", - "points" : [ - [ - 1521798304, - 0 - ] - ], - "tags" : [ - "kube_deployment:workflow-controller", - "kube_namespace:kube-system", - "kube_replica_set:workflow-controller-58bbf49865", - "pod_name:workflow-controller-58bbf49865-55xdz" - ], - "host" : "v1704", - "type" : "gauge", - "interval" : 0, - "source_type_name" : "System" -} -``` - -Advanced find: -```js -db.series.find({ - metric: "kubernetes.cpu.usage.total", - tags: { $all: ["kube_namespace:kube-system", "pod_name:kube-controller-manager"] } -}, {_id: 0}) // .count() -``` - -#### Aggregation pipeline - -Aggregate all tags for a metric: -```js -db.series.aggregate([ - { $match: { metric: "kubernetes.cpu.usage.total"} }, - { $project: {tags: 1} }, - { $unwind: "$tags" }, - { $group: {_id: "allTags", tags: {$addToSet: "$tags" } } } -]) -``` - -Aggregate all tags for a metric regex: -```js -db.series.aggregate([ - { $match: { metric: {$regex: "kubernetes*"} } }, - { $project: {tags: 1} }, - { $unwind: "$tags" }, - { $group: {_id: "allTags", tags: {$addToSet: "$tags" } } } -]) -``` - -Aggregate all tags for each metric matched by a regex: -```js -db.series.aggregate([ - { $match: { metric: {$regex: "kubernetes*"} } }, - { $project: { metric: 1, tags: 1 } }, - { $unwind: "$tags" }, - { $group: {_id: "$metric", tags: {$addToSet: "$tags" } } } -]) -``` - -Aggregate all metrics from a tag: -```js -db.series.aggregate([ - { $match: { tags: "kube_deployment:fake-app-datadog"} }, - { $group: { _id: "kube_deployment:fake-app-datadog", metrics: { $addToSet: "$metric" } } } -]) -``` - -Aggregate all metrics from tags ($or || $and): -```js -db.series.aggregate([ - { $match: { $or: [ - {tags: "kube_deployment:fake-app-datadog"}, - {tags: "kube_service:fake-app-datadog"} - ] } }, - { $group: { _id: "metricsToTags", metrics: { $addToSet: "$metric" } } } -]) -``` - -Aggregate a metric and a tag as timeseries: -```js -db.series.aggregate([ - { $match: { tags: "kube_deployment:dd", metric: "kubernetes.cpu.usage.total"} }, - { $unwind: "$points" }, - { $project: { - _id: { $arrayElemAt: [ "$points", 0 ] }, - value: { $arrayElemAt: [ "$points", 1 ] }, - tags: "$tags" - } - }, - { $sort: { _id: 1 } } -]) -``` - -Count tag occurrences on a given metric: -```js -db.series.aggregate([ - { $match: { metric: "kubernetes.filesystem.usage", tags: { $all: ["pod_name:fake-app-datadog-7cfb79db4d-dd4jr"] } } }, - { $project: {tags: 1} }, - { $unwind: "$tags" }, - { $group: {_id: "$tags", count: { $sum: 1 } } }, - { $sort: {count: -1} } -]) -``` - -#### Use standalone - -This tool can be used as a debug proxy to inspect agent payloads. Here is how to do it for Kubernetes. - -##### K8S -- run the following from within this folder: - -```console -docker build -t fake-datadog:latest . -docker tag fake-datadog:latest -docker push -# replace in fake-datadog.yaml before running the next command -kubectl apply -f fake-datadog.yaml -``` - -- edit your Datadog Agent Daemonset to use the service deployed above as the Datadog API. Be aware that each agent has its own intake - configuring `DD_DD_URL` doesn't cover the logs agent for example. - -```yaml -... - env: - ... - - name: DD_DD_URL - # if you deployed the service & deployment in a separate namespace, add `..svc.cluster.local - value: "http://fake-datadog" -``` - -##### Docker - -1. Create a `agent-docker-compose-extra.yaml` file to override url and V2 series environment variables - -```yaml -services: - agent: # use your agent service name here - environment: - DD_DD_URL: "http://fake-datadog" - DD_USE_V2_API_SERIES: false -``` - -- `agent` is the docker service name used for Datadog Agent. Rename it if you are using another service id. -- `DD_DD_URL` overrides the URL for metric submission -- `DD_USE_V2_API_SERIES` force using v1 APIs - -2. Run `docker compose up` passing datadog agent compose, agent extra compose and fake datadog compose - -```bash -docker compose up -f "${PATH_TO_AGENT_COMPOSE}.yaml" -f "fake-datadog.yaml" -f "agent-docker-compose-extra.yaml" -``` - -3. Query `datadog` on `mongo` service, reachable from host at `localhost:27017` and from another container at `mongo:27017` - -##### VM - -1. Create `fake-datadog` compose - -```bash -docker compose up -f "fake-datadog.yaml" -``` - -2. Configure the agent to send requests to `fake-datadog` using `V1` endpoint passing following environment variables - -```txt -DD_DD_URL="http://fake-datadog" -DD_USE_V2_API_SERIES=false -``` diff --git a/test/e2e/containers/fake_datadog/app/api.py b/test/e2e/containers/fake_datadog/app/api.py deleted file mode 100644 index 37445ac6d0ae1..0000000000000 --- a/test/e2e/containers/fake_datadog/app/api.py +++ /dev/null @@ -1,334 +0,0 @@ -import json -import logging -import os -import sys -import zlib -from os import path - -import monitoring -import pymongo -from flask import Flask, Response, jsonify, request - -app = application = Flask("datadoghq") -monitoring.monitor_flask(app) -handler = logging.StreamHandler(sys.stderr) -app.logger.addHandler(handler) -app.logger.setLevel("INFO") - -record_dir = path.join(path.dirname(path.abspath(__file__)), "recorded") - - -def get_collection(name: str): - c = pymongo.MongoClient("127.0.0.1", 27017, connectTimeoutMS=5000) - db = c.get_database("datadog") - return db.get_collection(name) - - -payload_names = [ - "check_run", - "series", - "intake", - "logs", -] - - -def reset_records(): - for elt in payload_names: - to_remove = path.join(record_dir, elt) - if path.isfile(to_remove): - app.logger.warning("rm %s", to_remove) - os.remove(to_remove) - - try: - get_collection(elt).drop() - - except Exception as e: - app.logger.error(e) - - -def record_and_loads(filename: str, content_type: str, content_encoding: str, content: str): - """ - :param filename: - :param content_type: - :param content_encoding: - :param content: - :return: list or dict - """ - if content_type != "application/json": - app.logger.error("Unsupported content-type: %s", content_type) - raise TypeError(content_type) - - if content_encoding == "deflate": - content = zlib.decompress(content) - - content = content.decode() - content = f"{content}\n" if content[-1] != "\n" else content - with open(path.join(record_dir, filename), "a") as f: - f.write(content) - - return json.loads(content) - - -def patch_data(data, patch_key, patch_leaf): - if isinstance(data, dict): - return {patch_key(k): patch_data(v, patch_key, patch_leaf) for k, v in iter(data.items())} - elif isinstance(data, list): - return [patch_data(i, patch_key, patch_leaf) for i in data] - else: - return patch_leaf(data) - - -def fix_data(data): - return patch_data( - data, - # Whereas dot (.) and dollar ($) are valid characters inside a JSON dict key, - # they are not allowed as keys in a MongoDB BSON object. - # The official MongoDB documentation suggests to replace them with their - # unicode full width equivalent: - # https://docs.mongodb.com/v2.6/faq/developers/#dollar-sign-operator-escaping - patch_key=lambda x: x.translate(str.maketrans('.$', '\uff0e\uff04')), - # Values that cannot fit in a 64 bits integer must be represented as a float. - patch_leaf=lambda x: float(x) if isinstance(x, int) and x > 2**63 - 1 else x, - ) - - -def insert_series(data: dict): - coll = get_collection("series") - coll.insert_many(data["series"]) - - -def insert_intake(data: dict): - coll = get_collection("intake") - coll.insert_one(data) - - -def insert_check_run(data: list): - coll = get_collection("check_run") - coll.insert_many(data) - - -def insert_logs(data: list): - coll = get_collection("logs") - coll.insert_many(data) - - -def get_series_from_query(q: dict): - app.logger.info("Query is %s", q["query"]) - query = q["query"].replace("avg:", "") - first_open_brace, first_close_brace = query.index("{"), query.index("}") - - metric_name = query[:first_open_brace] - from_ts, to_ts = int(q["from"]), int(q["to"]) - - # tags - all_tags = query[first_open_brace + 1 : first_close_brace] - all_tags = all_tags.split(",") if all_tags else [] - - # group by - # TODO - last_open_brace, last_close_brace = query.rindex("{"), query.rindex("}") - group_by = query[last_open_brace + 1 : last_close_brace].split(",") # noqa: F841 - - match_conditions = [ - {"metric": metric_name}, - {"points.0.0": {"$gt": from_ts}}, - {"points.0.0": {"$lt": to_ts}}, - ] - if all_tags: - match_conditions.append({'tags': {"$all": all_tags}}) - - c = get_collection("series") - aggregate = [ - {"$match": {"$and": match_conditions}}, - {"$unwind": "$points"}, - {"$group": {"_id": "$metric", "points": {"$push": "$points"}}}, - {"$sort": {"points.0": 1}}, - ] - app.logger.info("Mongodb aggregate is %s", aggregate) - cur = c.aggregate(aggregate) - points_list = [] - for elt in cur: - for p in elt["points"]: - p[0] *= 1000 - points_list.append(p) - - result = { - "status": "ok", - "res_type": "time_series", - "series": [ - { - "metric": metric_name, - "attributes": {}, - "display_name": metric_name, - "unit": None, - "pointlist": points_list, - "end": points_list[-1][0] if points_list else 0.0, - "interval": 600, - "start": points_list[0][0] if points_list else 0.0, - "length": len(points_list), - "aggr": None, - "scope": "host:vagrant-ubuntu-trusty-64", # TODO - "expression": query, - } - ], - "from_date": from_ts, - "group_by": ["host"], - "to_date": to_ts, - "query": q["query"], - "message": "", - } - return result - - -@app.route("/api/v1/validate", methods=["GET"]) -def validate(): - return Response(status=200) - - -@app.route("/api/v1/query", methods=["GET"]) -def metrics_query(): - """ - Honor a query like documented here: - https://docs.datadoghq.com/api/?lang=bash#query-time-series-points - :return: - """ - if "query" not in request.args or "from" not in request.args or "to" not in request.args: - return Response(status=400) - - return jsonify(get_series_from_query(request.args)) - - -@app.route("/api/v1/series", methods=["POST"]) -def series(): - data = record_and_loads( - filename="series", - content_type=request.content_type, - content_encoding=request.content_encoding, - content=request.data, - ) - data = fix_data(data) - insert_series(data) - return Response(status=200) - - -@app.route("/api/v1/check_run", methods=["POST"]) -def check_run(): - data = record_and_loads( - filename="check_run", - content_type=request.content_type, - content_encoding=request.content_encoding, - content=request.data, - ) - data = fix_data(data) - insert_check_run(data) - return Response(status=200) - - -@app.route("/intake/", methods=["POST"]) -def intake(): - data = record_and_loads( - filename="intake", - content_type=request.content_type, - content_encoding=request.content_encoding, - content=request.data, - ) - data = fix_data(data) - insert_intake(data) - return Response(status=200) - - -@app.route("/v1/input/", methods=["POST"]) -def logs(): - data = record_and_loads( - filename="logs", - content_type=request.content_type, - content_encoding=request.content_encoding, - content=request.data, - ) - data = fix_data(data) - insert_logs(data) - return Response(status=200) - - -@app.route("/api/v2/orch", methods=["POST"]) -def orchestrator(): - # TODO - return Response(status=200) - - -@app.before_request -def logging(): - # use only if you need to check headers - # mind where the logs of this container go since headers contain an API key - # app.logger.info( - # "path: %s, method: %s, content-type: %s, content-encoding: %s, content-length: %s, headers: %s", - # request.path, request.method, request.content_type, request.content_encoding, request.content_length, request.headers) - app.logger.info( - "path: %s, method: %s, content-type: %s, content-encoding: %s, content-length: %s", - request.path, - request.method, - request.content_type, - request.content_encoding, - request.content_length, - ) - - -def stat_records(): - j = {} - for elt in payload_names: - try: - p = path.join(record_dir, elt) - st = os.stat(p) - lines = 0 - with open(p) as f: - for _ in f: - lines += 1 - j[elt] = {"size": st.st_size, "lines": lines} - - except FileNotFoundError: - j[elt] = {"size": -1, "lines": -1} - return j - - -@app.route("/_/records") -def available_records(): - return jsonify(stat_records()) - - -@app.route("/_/records/") -def get_records(name): - if name not in payload_names: - return Response(status=404) - - if path.isfile(path.join(record_dir, name)) is False: - return Response(status=503) - - payloads = [] - with open(path.join(record_dir, name)) as f: - for line in f: - payloads.append(json.loads(line)) - return json.dumps(payloads), 200 - - -@application.route('/', methods=['GET']) -def api_mapper(): - rules = [k.rule for k in application.url_map.iter_rules()] - rules = list(set(rules)) - rules.sort() - return jsonify(rules) - - -@application.route('/_/reset', methods=['POST']) -def reset(): - reset_records() - return jsonify(stat_records()) - - -@application.errorhandler(404) -def not_found(_): - app.logger.warning("404 %s %s", request.path, request.method) - return Response("404", status=404, mimetype="text/plain") - - -if __name__ == '__main__': - app.run(host="0.0.0.0", debug=True, port=5000) diff --git a/test/e2e/containers/fake_datadog/app/monitoring.py b/test/e2e/containers/fake_datadog/app/monitoring.py deleted file mode 100644 index 15ae0a1a9e3c8..0000000000000 --- a/test/e2e/containers/fake_datadog/app/monitoring.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -import sys -import time - -from flask import Flask, Response, g, request -from prometheus_client import CONTENT_TYPE_LATEST, CollectorRegistry, Counter, Histogram, generate_latest, multiprocess - - -def extract_exception_name(exc_info=None): - """ - Function to get the exception name and module - :param exc_info: - :return: - """ - if not exc_info: - exc_info = sys.exc_info() - return f'{exc_info[0].__module__}.{exc_info[0].__name__}' - - -def monitor_flask(app: Flask): - """ - Add components to monitor each route with prometheus - The monitoring is available at /metrics - :param app: Flask application - :return: - """ - prometheus_state_dir = os.getenv('prometheus_multiproc_dir', "") - if "gunicorn" not in os.getenv("SERVER_SOFTWARE", "") and prometheus_state_dir == "": - return - - if os.path.isdir(prometheus_state_dir) is False: - os.mkdir(prometheus_state_dir) - - metrics = CollectorRegistry() - - def collect(): - registry = CollectorRegistry() - multiprocess.MultiProcessCollector(registry) - data = generate_latest(registry) - return Response(data, mimetype=CONTENT_TYPE_LATEST) - - app.add_url_rule('/metrics', 'metrics', collect) - - additional_kwargs = {'registry': metrics} - request_latency = Histogram( - 'requests_duration_seconds', 'Backend API request latency', ['method', 'path'], **additional_kwargs - ) - status_count = Counter( - 'responses_total', 'Backend API response count', ['method', 'path', 'status_code'], **additional_kwargs - ) - exception_latency = Histogram( - 'exceptions_duration_seconds', - 'Backend API top-level exception latency', - ['method', 'path', 'type'], - **additional_kwargs, - ) - - @app.before_request - def start_measure(): - g._start_time = time.time() - - @app.after_request - def count_status(response: Response): - status_count.labels(request.method, request.url_rule, response.status_code).inc() - request_latency.labels(request.method, request.url_rule).observe(time.time() - g._start_time) - return response - - # Override log_exception to increment the exception counter - def log_exception(exc_info): - class_name = extract_exception_name(exc_info) - exception_latency.labels(request.method, request.url_rule, class_name).observe(time.time() - g._start_time) - app.logger.error('Exception on %s [%s]', request.path, request.method, exc_info=exc_info) - - app.log_exception = log_exception diff --git a/test/e2e/containers/fake_datadog/app/requirements.txt b/test/e2e/containers/fake_datadog/app/requirements.txt deleted file mode 100644 index 146792ede7f30..0000000000000 --- a/test/e2e/containers/fake_datadog/app/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -Flask==2.1.2 -gunicorn==20.1.0 -pymongo==4.1.1 -prometheus-client==0.14.1 diff --git a/test/e2e/containers/fake_datadog/docker-compose.yaml b/test/e2e/containers/fake_datadog/docker-compose.yaml deleted file mode 100644 index eb4ff70532c8d..0000000000000 --- a/test/e2e/containers/fake_datadog/docker-compose.yaml +++ /dev/null @@ -1,13 +0,0 @@ -version: "3.9" -services: - fake-datadog: - image: "datadog/fake-datadog:20220621" - ports: - - "8080:80" - - "27017:27017" - container_name: fake-datadog - mongo: - image: "mongo:5.0" - container_name: mongo - network_mode: "service:fake-datadog" - diff --git a/test/e2e/containers/fake_datadog/fake-datadog.yaml b/test/e2e/containers/fake_datadog/fake-datadog.yaml deleted file mode 100644 index ceeceda9b3b60..0000000000000 --- a/test/e2e/containers/fake_datadog/fake-datadog.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: fake-datadog -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - name: api - - port: 27017 - protocol: TCP - targetPort: 27017 - name: mongo - selector: - app: fake-datadog - type: ClusterIP - ---- - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fake-datadog -spec: - replicas: 1 - selector: - matchLabels: - app: fake-datadog - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - template: - metadata: - labels: - app: fake-datadog - spec: - containers: - - name: api - image: - imagePullPolicy: Always - - name: mongo - image: mongo:3.6.3 - diff --git a/test/e2e/containers/otlp_sender/Dockerfile b/test/e2e/containers/otlp_sender/Dockerfile deleted file mode 100644 index 5613d30c6c642..0000000000000 --- a/test/e2e/containers/otlp_sender/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM alpine:latest as prep -RUN apk --update add ca-certificates - -FROM scratch -ARG USER_UID=10001 -USER ${USER_UID} -COPY --from=prep /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt -COPY otlpsender / -EXPOSE 4317 55680 55679 -ENTRYPOINT ["/otlpsender"] -CMD ["--config", "/etc/otel/config.yaml"] diff --git a/test/e2e/containers/otlp_sender/Makefile b/test/e2e/containers/otlp_sender/Makefile deleted file mode 100644 index 880e85dfca46c..0000000000000 --- a/test/e2e/containers/otlp_sender/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -TAG?=latest - -default: push - -otlpsender: - GOOS=linux GOARCH=amd64 go build -o $@ ./cmd/sender - -docker-build: otlpsender - docker build --force-rm -t datadog/docker-library:e2e-otlp-sender_$(TAG) . - -push: docker-build - docker push datadog/docker-library:e2e-otlp-sender_$(TAG) diff --git a/test/e2e/containers/otlp_sender/cmd/sender/main.go b/test/e2e/containers/otlp_sender/cmd/sender/main.go deleted file mode 100644 index 242b6ef83f01d..0000000000000 --- a/test/e2e/containers/otlp_sender/cmd/sender/main.go +++ /dev/null @@ -1,79 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2021-present Datadog, Inc. - -// Program otlp_sender sends telemetry data defined in a given file -package main - -import ( - "log" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/loggingexporter" - "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/exporter/otlphttpexporter" - "go.opentelemetry.io/collector/extension" - "go.opentelemetry.io/collector/otelcol" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" - "go.uber.org/multierr" - - "github.com/DataDog/datadog-agent/tests/e2e/containers/otlp_sender/internal/filereceiver" -) - -func components() ( - otelcol.Factories, - error, -) { - var errs error - - extensions, err := extension.MakeFactoryMap() - errs = multierr.Append(errs, err) - - receivers, err := receiver.MakeFactoryMap( - filereceiver.NewFactory(), - ) - errs = multierr.Append(errs, err) - - exporters, err := exporter.MakeFactoryMap( - otlpexporter.NewFactory(), - otlphttpexporter.NewFactory(), - loggingexporter.NewFactory(), - ) - errs = multierr.Append(errs, err) - - processors, err := processor.MakeFactoryMap() - errs = multierr.Append(errs, err) - - factories := otelcol.Factories{ - Extensions: extensions, - Receivers: receivers, - Processors: processors, - Exporters: exporters, - } - - return factories, errs -} - -func main() { - factories, err := components() - if err != nil { - log.Fatalf("failed to build components: %v", err) - } - - cmd := otelcol.NewCommand(otelcol.CollectorSettings{ - BuildInfo: component.BuildInfo{ - Command: "otlpsender", - Description: "OpenTelemetry test sender", - Version: "latest", - }, - Factories: func() (otelcol.Factories, error) { - return factories, nil - }, - }) - if err := cmd.Execute(); err != nil { - log.Fatalf("collector server run finished with error: %v", err) - } -} diff --git a/test/e2e/containers/otlp_sender/go.mod b/test/e2e/containers/otlp_sender/go.mod deleted file mode 100644 index 13a7945ebe571..0000000000000 --- a/test/e2e/containers/otlp_sender/go.mod +++ /dev/null @@ -1,109 +0,0 @@ -module github.com/DataDog/datadog-agent/tests/e2e/containers/otlp_sender - -go 1.22.0 - -require ( - go.opentelemetry.io/collector/component v0.104.0 - go.opentelemetry.io/collector/consumer v0.104.0 - go.opentelemetry.io/collector/exporter v0.104.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 - go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 - go.opentelemetry.io/collector/extension v0.104.0 - go.opentelemetry.io/collector/otelcol v0.104.0 - go.opentelemetry.io/collector/pdata v1.11.0 - go.opentelemetry.io/collector/processor v0.104.0 - go.opentelemetry.io/collector/receiver v0.104.0 - go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.27.0 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/knadh/koanf/maps v0.1.1 // indirect - github.com/knadh/koanf/providers/confmap v0.1.0 // indirect - github.com/knadh/koanf/v2 v2.1.1 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mostynb/go-grpc-compression v1.2.3 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect - github.com/prometheus/procfs v0.15.0 // indirect - github.com/rs/cors v1.10.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.104.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.104.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.11.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.104.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.104.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.104.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.11.0 // indirect - go.opentelemetry.io/collector/config/configretry v1.11.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.104.0 // indirect - go.opentelemetry.io/collector/config/internal v0.104.0 // indirect - go.opentelemetry.io/collector/confmap v0.104.0 // indirect - go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect - go.opentelemetry.io/collector/featuregate v1.11.0 // indirect - go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/test/e2e/containers/otlp_sender/go.sum b/test/e2e/containers/otlp_sender/go.sum deleted file mode 100644 index 7ab785e653595..0000000000000 --- a/test/e2e/containers/otlp_sender/go.sum +++ /dev/null @@ -1,344 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= -github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= -github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= -github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= -github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= -github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= -github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= -go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= -go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= -go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= -go.opentelemetry.io/collector/config/configauth v0.104.0 h1:ULtjugImijpKuLgGVt0E0HwiZT7+uDUEtMquh1ODB24= -go.opentelemetry.io/collector/config/configauth v0.104.0/go.mod h1:Til+nLLrQwwhgmfcGTX4ZRcNuMhdaWhBW1jH9DLTabQ= -go.opentelemetry.io/collector/config/configcompression v1.11.0 h1:oTwbcLh7mWHSDUIZXkRJVdNAMoBGS39XF68goTMOQq8= -go.opentelemetry.io/collector/config/configcompression v1.11.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/configgrpc v0.104.0 h1:E3RtqryQPOm/trJmhlJZj6cCqJNKgv9fOEQvSEpzsFM= -go.opentelemetry.io/collector/config/configgrpc v0.104.0/go.mod h1:tu3ifnJ5pv+4rZcaqNWfvVLjNKb8icSPoClN3THN8PU= -go.opentelemetry.io/collector/config/confighttp v0.104.0 h1:KSY0FSHSjuPyrR6iA2g5oFTozYFpYcy0ssJny8gTNTQ= -go.opentelemetry.io/collector/config/confighttp v0.104.0/go.mod h1:YgSXwuMYHANzzv+IBjHXaBMG/4G2mrseIpICHj+LB3U= -go.opentelemetry.io/collector/config/confignet v0.104.0 h1:i7AOTJf4EQox3SEt1YtQFQR+BwXr3v5D9x3Ai9/ovy8= -go.opentelemetry.io/collector/config/confignet v0.104.0/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= -go.opentelemetry.io/collector/config/configopaque v1.11.0 h1:Pt06PXWVmRaiSX63mzwT8Z9SV/hOc6VHNZbfZ10YY4o= -go.opentelemetry.io/collector/config/configopaque v1.11.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configretry v1.11.0 h1:UdEDD0ThxPU7+n2EiKJxVTvDCGygXu9hTfT6LOQv9DY= -go.opentelemetry.io/collector/config/configretry v1.11.0/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.104.0 h1:bMmLz2+r+REpO7cDOR+srOJHfitqTZfSZCffDpKfwWk= -go.opentelemetry.io/collector/config/configtls v0.104.0/go.mod h1:e33o7TWcKfe4ToLFyGISEPGMgp6ezf3yHRGY4gs9nKk= -go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4fH+54puSBuzm6GQbuEZ2o= -go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= -go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= -go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= -go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= -go.opentelemetry.io/collector/connector v0.104.0/go.mod h1:78SEHel3B3taFnSBg/syW4OV9aU1Ec9KjgbgHf/L8JA= -go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= -go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= -go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= -go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0 h1:MaBTuHmK/HAQ+/rLTrGf3tazKum8Sic3/CaXgNr5xnc= -go.opentelemetry.io/collector/exporter/loggingexporter v0.104.0/go.mod h1:sXZhACvds6z71cf2fzKrojMgdJItJZxeClKlF/PI/l8= -go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= -go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0/go.mod h1:fAF7Q3Xh0OkxYWUycdrNNDXkyz3nhHIRKDkez0aQ6zg= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 h1:JkNCOj7DdyJhcYIaRqtS/X+YtAPRjE4pcruyY6LoM7c= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0/go.mod h1:6rs4Xugs7tIC3IFbAC+fj56zLiVc7osXC5UTjk/Mkw4= -go.opentelemetry.io/collector/extension v0.104.0 h1:bftkgFMKya/QIwK+bOxEAPVs/TvTez+s1mlaiUznJkA= -go.opentelemetry.io/collector/extension v0.104.0/go.mod h1:x7K0KyM1JGrtLbafEbRoVp0VpGBHpyx9hu87bsja6S4= -go.opentelemetry.io/collector/extension/auth v0.104.0 h1:SelhccGCrqLThPlkbv6lbAowHsjgOTAWcAPz085IEC4= -go.opentelemetry.io/collector/extension/auth v0.104.0/go.mod h1:s3/C7LTSfa91QK0JPMTRIvH/gCv+a4DGiiNeTAX9OhI= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 h1:rJ9Sw6DR27s6bW7lWBjJhjth5CXpltAHBKIgUFgVwFs= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0/go.mod h1:85Exj8r237PIvaXL1a/S0KeVNnm3kQNpVXtu0O2Zk5k= -go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= -go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= -go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= -go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= -go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= -go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= -go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= -go.opentelemetry.io/collector/receiver v0.104.0 h1:URL1ExkYYd+qbndm7CdGvI2mxzsv/pNfmwJ+1QSQ9/o= -go.opentelemetry.io/collector/receiver v0.104.0/go.mod h1:+enTCZQLf6dRRANWvykXEzrlRw2JDppXJtoYWd/Dd54= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5JI9J3P77SFUrE= -go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/test/e2e/containers/otlp_sender/internal/filereceiver/factory.go b/test/e2e/containers/otlp_sender/internal/filereceiver/factory.go deleted file mode 100644 index 29864597b910a..0000000000000 --- a/test/e2e/containers/otlp_sender/internal/filereceiver/factory.go +++ /dev/null @@ -1,157 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2021-present Datadog, Inc. - -// Package filereceiver implements a receiver that reads OTLP metrics from a given file. -package filereceiver - -import ( - "bufio" - "context" - "errors" - "fmt" - "log" - "os" - "time" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/pdata/pmetric" - collectorreceiver "go.opentelemetry.io/collector/receiver" - - "go.uber.org/zap" -) - -const typeStr = "file" - -// NewFactory creates a new OTLP receiver factory. -func NewFactory() collectorreceiver.Factory { - cfgType, _ := component.NewType(typeStr) - return collectorreceiver.NewFactory( - cfgType, - createDefaultConfig, - collectorreceiver.WithMetrics(createMetricsReceiver, component.StabilityLevelAlpha), - ) -} - -// Config of filereceiver. -type Config struct { - collectorreceiver.Settings `mapstructure:",squash"` - // Path of metrics data. - Path string `mapstructure:"path"` - // LoopConfig is the loop configuration. - Loop LoopConfig `mapstructure:"loop"` -} - -// LoopConfig is the loop configuration. -type LoopConfig struct { - // Enabled states whether the feature is enabled. - Enabled bool `mapstructure:"enabled"` - // Period defines the loop period. - Period time.Duration `mapstructure:"period"` -} - -// Validate configuration of receiver. -func (cfg *Config) Validate() error { - if cfg.Path == "" { - return errors.New("path can't be empty") - } - return nil -} - -func createDefaultConfig() component.Config { - cfgType, _ := component.NewType(typeStr) - return &Config{ - Settings: collectorreceiver.Settings{ - ID: component.NewID(cfgType), - }, - Loop: LoopConfig{Enabled: false, Period: 10 * time.Second}, - } -} - -var _ collectorreceiver.Metrics = (*receiver)(nil) - -type receiver struct { - config *Config - logger *zap.Logger - unmarshaler pmetric.Unmarshaler - nextConsumer consumer.Metrics - stopCh chan struct{} -} - -func (r *receiver) Start(_ context.Context, host component.Host) error { - if r.config.Loop.Enabled { - r.logger.Info("Running in a loop") - go r.unmarshalLoop(host) - } else { - r.logger.Info("Running just once") - go r.unmarshalAndSend(host) - } - return nil -} - -func (r *receiver) unmarshalAndSend(_ component.Host) { - file, err := os.Open(r.config.Path) - if err != nil { - log.Fatal(fmt.Errorf("failed to open %q: %w", r.config.Path, err)) - return - } - - r.logger.Info("Sending metrics batch") - scanner := bufio.NewScanner(file) - for scanner.Scan() { - metrics, err := r.unmarshaler.UnmarshalMetrics(scanner.Bytes()) - if err != nil { - log.Fatal(fmt.Errorf("failed to unmarshal %q: %w", r.config.Path, err)) - return - } - - err = r.nextConsumer.ConsumeMetrics(context.Background(), metrics) - if err != nil { - log.Fatal(fmt.Errorf("failed to send %q: %w", r.config.Path, err)) - return - } - } - - if err := scanner.Err(); err != nil { - log.Fatal(fmt.Errorf("failed to scan %q: %w", r.config.Path, err)) - return - } - - if err := file.Close(); err != nil { - log.Fatal(fmt.Errorf("failed to close %q: %w", r.config.Path, err)) - return - } -} - -func (r *receiver) unmarshalLoop(host component.Host) { - for { - r.unmarshalAndSend(host) - select { - case <-time.After(r.config.Loop.Period): - case <-r.stopCh: - return - } - } -} - -func (r *receiver) Shutdown(context.Context) error { - close(r.stopCh) - return nil -} - -func createMetricsReceiver( - _ context.Context, - set collectorreceiver.Settings, - cfg component.Config, - consumer consumer.Metrics, -) (collectorreceiver.Metrics, error) { - return &receiver{ - config: cfg.(*Config), - logger: set.Logger, - unmarshaler: &pmetric.JSONUnmarshaler{}, - nextConsumer: consumer, - stopCh: make(chan struct{}), - }, nil -} diff --git a/test/e2e/cws-tests/README.md b/test/e2e/cws-tests/README.md deleted file mode 100644 index 5008231122df1..0000000000000 --- a/test/e2e/cws-tests/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# CSPM e2e tests - -## Docker flavors - -To run docker flavoured tests, local only, please run: - -For CSPM: -```sh -DD_API_KEY= \ -DD_APP_KEY= \ -DD_SITE=datadoghq.com \ -DD_AGENT_IMAGE=datadog/agent-dev:master \ -python3 tests/test_e2e_cspm_docker.py -``` - -Please change `DD_AGENT_IMAGE` to a branch specific tag if you need to test a specific branch. diff --git a/test/e2e/cws-tests/requirements.txt b/test/e2e/cws-tests/requirements.txt deleted file mode 100644 index cc9857c383b50..0000000000000 --- a/test/e2e/cws-tests/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -kubernetes==30.1.0 -datadog-api-client==2.27.0 -pyaml==24.7.0 -docker==7.1.0 -retry==0.9.2 -emoji==2.12.1 -requests==2.32.3 -jsonschema==4.23.0 \ No newline at end of file diff --git a/test/e2e/cws-tests/tests/lib/common/app.py b/test/e2e/cws-tests/tests/lib/common/app.py deleted file mode 100644 index 37a8dd8b5829d..0000000000000 --- a/test/e2e/cws-tests/tests/lib/common/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import time - -from datadog_api_client.v1 import ApiClient, Configuration -from datadog_api_client.v1.api.metrics_api import MetricsApi -from retry.api import retry_call - - -class App: - def __init__(self): - self.v1_api_client = ApiClient(Configuration()) - - def query_metric(self, name, **kw): - api_instance = MetricsApi(self.v1_api_client) - - tags = [] - for key, value in kw.items(): - tags.append(f"{key}:{value}") - if len(tags) == 0: - tags.append("*") - - response = api_instance.query_metrics(int(time.time()) - 30, int(time.time()), f"{name}{{{','.join(tags)}}}") - return response - - def wait_for_metric(self, name, tries=30, delay=10, **kw): - def expect_metric(): - metric = self.query_metric(name, **kw) - if len(metric.get("series")) == 0: - raise LookupError(f"no value found in {metric}") - return metric - - return retry_call(expect_metric, tries=tries, delay=delay) diff --git a/test/e2e/cws-tests/tests/lib/config.py b/test/e2e/cws-tests/tests/lib/config.py deleted file mode 100644 index 6051bee608045..0000000000000 --- a/test/e2e/cws-tests/tests/lib/config.py +++ /dev/null @@ -1,45 +0,0 @@ -import tempfile - -import yaml - - -def gen_system_probe_config(npm_enabled=False, rc_enabled=False, log_level="INFO", log_patterns=None): - fp = tempfile.NamedTemporaryFile(prefix="e2e-system-probe-", mode="w", delete=False) - - if not log_patterns: - log_patterns = [] - - data = { - "system_probe_config": {"log_level": log_level}, - "network_config": {"enabled": npm_enabled}, - "runtime_security_config": { - "log_patterns": log_patterns, - "network": {"enabled": True}, - "remote_configuration": {"enabled": rc_enabled, "refresh_interval": "5s"}, - }, - } - - yaml.dump(data, fp) - fp.close() - - return fp.name - - -def gen_datadog_agent_config(hostname="myhost", log_level="INFO", tags=None, rc_enabled=False, rc_key=None): - fp = tempfile.NamedTemporaryFile(prefix="e2e-datadog-agent-", mode="w", delete=False) - - if not tags: - tags = [] - - data = { - "log_level": log_level, - "hostname": hostname, - "tags": tags, - "security_agent.remote_workloadmeta": True, - "remote_configuration": {"enabled": rc_enabled, "refresh_interval": "5s", "key": rc_key}, - } - - yaml.dump(data, fp) - fp.close() - - return fp.name diff --git a/test/e2e/cws-tests/tests/lib/const.py b/test/e2e/cws-tests/tests/lib/const.py deleted file mode 100644 index 7dc3c90a1d31c..0000000000000 --- a/test/e2e/cws-tests/tests/lib/const.py +++ /dev/null @@ -1,3 +0,0 @@ -SECURITY_START_LOG = "Successfully connected to the runtime security module" -SYS_PROBE_START_LOG = "runtime security started" -SEC_AGENT_PATH = "/opt/datadog-agent/embedded/bin/security-agent" diff --git a/test/e2e/cws-tests/tests/lib/cspm/api.py b/test/e2e/cws-tests/tests/lib/cspm/api.py deleted file mode 100644 index a139c99ab9a3b..0000000000000 --- a/test/e2e/cws-tests/tests/lib/cspm/api.py +++ /dev/null @@ -1,56 +0,0 @@ -import os - -import lib.common.app as common -import requests -from retry.api import retry_call - - -def aggregate_logs(query, track): - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - app_key = os.environ["DD_APP_KEY"] - - url = f"https://api.{site}/api/v2/logs/analytics/aggregate?type={track}" - body = { - "compute": [{"aggregation": "count", "type": "total"}], - "filter": { - "from": "now-3m", - "to": "now", - "query": query, - }, - } - - r = requests.post( - url, - headers={"DD-API-KEY": api_key, "DD-APPLICATION-KEY": app_key}, - json=body, - ) - api_response = r.json() - if not api_response["data"] or not api_response["data"]["buckets"]: - raise LookupError(query) - - count = api_response["data"]["buckets"][0]["computes"]["c0"] - if count == 0: - raise LookupError(query) - - return api_response - - -def fetch_app_findings(query): - return aggregate_logs(query, track="cpfinding") - - -def fetch_app_compliance_event(query): - return aggregate_logs(query, track="compliance") - - -def wait_for_findings(query, tries=30, delay=5): - return retry_call(fetch_app_findings, fargs=[query], tries=tries, delay=delay) - - -def wait_for_compliance_event(query, tries=30, delay=5): - return retry_call(fetch_app_compliance_event, fargs=[query], tries=tries, delay=delay) - - -class App(common.App): - pass diff --git a/test/e2e/cws-tests/tests/lib/cspm/finding.py b/test/e2e/cws-tests/tests/lib/cspm/finding.py deleted file mode 100644 index e3a7839cc3f52..0000000000000 --- a/test/e2e/cws-tests/tests/lib/cspm/finding.py +++ /dev/null @@ -1,27 +0,0 @@ -import json - - -def extract_findings(lines): - if not lines: - return [] - - res_lines = ["["] - for line in lines: - if line == "}": - res_lines.append("},") - else: - res_lines.append(line) - res_lines.pop() - res_lines.extend(["}", "]"]) - return json.loads("".join(res_lines)) - - -def is_subset(subset, superset): - if isinstance(subset, dict): - return all(key in superset and is_subset(val, superset[key]) for key, val in subset.items()) - - if isinstance(subset, list) or isinstance(subset, set): - return all(any(is_subset(subitem, superitem) for superitem in superset) for subitem in subset) - - # assume that subset is a plain value if none of the above match - return subset == superset diff --git a/test/e2e/cws-tests/tests/lib/docker.py b/test/e2e/cws-tests/tests/lib/docker.py deleted file mode 100644 index 40047900f968e..0000000000000 --- a/test/e2e/cws-tests/tests/lib/docker.py +++ /dev/null @@ -1,154 +0,0 @@ -import os -import tarfile -import tempfile - -import docker -from retry.api import retry_call - -from lib.const import SEC_AGENT_PATH -from lib.log import LogGetter - - -def is_container_running(container): - container.reload() - if container.status != "running": - raise Exception - - -class DockerHelper(LogGetter): - def __init__(self): - self.client = docker.from_env() - - self.agent_container = None - - def start_cspm_agent(self, image, datadog_agent_config=None): - volumes = [ - "/var/run/docker.sock:/var/run/docker.sock:ro", - "/proc/:/host/proc/:ro", - "/sys/fs/cgroup/:/host/sys/fs/cgroup:ro", - "/etc/passwd:/etc/passwd:ro", - "/etc/os-release:/host/etc/os-release:ro", - "/:/host/root:ro", - ] - - if datadog_agent_config: - volumes.append(f"{datadog_agent_config}:/etc/datadog-agent/datadog.yaml") - - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - - self.agent_container = self.client.containers.run( - image, - environment=[ - "DD_COMPLIANCE_CONFIG_ENABLED=true", - "HOST_ROOT=/host/root", - f"DD_SITE={site}", - f"DD_API_KEY={api_key}", - ], - volumes=volumes, - detach=True, - ) - - return self.agent_container - - def start_cws_agent(self, image, datadog_agent_config=None, system_probe_config=None): - volumes = [ - "/var/run/docker.sock:/var/run/docker.sock:ro", - "/proc/:/host/proc/:ro", - "/sys/fs/cgroup/:/host/sys/fs/cgroup:ro", - "/etc/passwd:/etc/passwd:ro", - "/etc/group:/etc/group:ro", - "/:/host/root:ro", - "/sys/kernel/debug:/sys/kernel/debug", - "/etc/os-release:/etc/os-release", - ] - - if datadog_agent_config: - volumes.append(f"{datadog_agent_config}:/etc/datadog-agent/datadog.yaml") - - if system_probe_config: - volumes.append(f"{system_probe_config}:/etc/datadog-agent/system-probe.yaml") - - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - - self.agent_container = self.client.containers.run( - image, - cap_add=["SYS_ADMIN", "SYS_RESOURCE", "SYS_PTRACE", "NET_ADMIN", "IPC_LOCK"], - security_opt=["apparmor:unconfined"], - environment=[ - "DD_RUNTIME_SECURITY_CONFIG_ENABLED=true", - "DD_SYSTEM_PROBE_ENABLED=true", - "HOST_ROOT=/host/root", - f"DD_SITE={site}", - f"DD_API_KEY={api_key}", - ], - volumes=volumes, - detach=True, - ) - - return self.agent_container - - def download_policies(self): - command = SEC_AGENT_PATH + " runtime policy download" - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - app_key = os.environ["DD_APP_KEY"] - return self.agent_container.exec_run( - command, - stderr=False, - stdout=True, - stream=False, - environment=[ - f"DD_SITE={site}", - f"DD_API_KEY={api_key}", - f"DD_APP_KEY={app_key}", - ], - ) - - def push_policies(self, policies): - temppolicy = tempfile.NamedTemporaryFile(prefix="e2e-policy-", mode="w", delete=False) - temppolicy.write(policies) - temppolicy.close() - temppolicy_path = temppolicy.name - self.cp_file(temppolicy_path, "/etc/datadog-agent/runtime-security.d/default.policy") - os.remove(temppolicy_path) - - def cp_file(self, src, dst): - tar = tarfile.open(src + '.tar', mode='w') - try: - tar.add(src) - finally: - tar.close() - data = open(src + '.tar', 'rb').read() - self.agent_container.put_archive("/tmp", data) - self.agent_container.exec_run("mv /tmp/" + src + " " + dst) - - def reload_policies(self): - self.agent_container.exec_run(SEC_AGENT_PATH + " runtime policy reload") - - def wait_agent_container(self, tries=10, delay=5): - return retry_call(is_container_running, fargs=[self.agent_container], tries=tries, delay=delay) - - def get_log(self, agent_name): - log_prefix = None - if agent_name == "security-agent": - log_prefix = "SECURITY" - elif agent_name == "system-probe": - log_prefix = "SYS-PROBE" - else: - raise LookupError(agent_name) - - log = self.agent_container.logs(since=1).decode("utf-8") - - result = [line for line in log.splitlines() if log_prefix in line] - if result: - return result - raise LookupError(agent_name) - - def close(self): - if self.agent_container: - self.agent_container.stop() - self.agent_container.remove() - - self.client.close() diff --git a/test/e2e/cws-tests/tests/lib/kubernetes.py b/test/e2e/cws-tests/tests/lib/kubernetes.py deleted file mode 100644 index 6d5e4267150e2..0000000000000 --- a/test/e2e/cws-tests/tests/lib/kubernetes.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import tarfile -import tempfile - -from kubernetes import client, config -from kubernetes.stream import stream - -from lib.const import SEC_AGENT_PATH -from lib.log import LogGetter - - -class KubernetesHelper(LogGetter): - def __init__(self, namespace, in_cluster=False): - if in_cluster: - config.load_incluster_config() - else: - config.load_kube_config() - - self.api_client = client.CoreV1Api() - - self.namespace = namespace - self.pod_name = None - - def select_pod_name(self, label_selector): - resp = self.api_client.list_namespaced_pod(namespace=self.namespace, label_selector=label_selector) - for i in resp.items: - self.pod_name = i.metadata.name - return - raise LookupError(label_selector) - - def get_log(self, agent_name): - log = self.api_client.read_namespaced_pod_log( - name=self.pod_name, namespace=self.namespace, container=agent_name, follow=False, tail_lines=10000 - ) - - return log.splitlines() - - def exec_command(self, container, command=None): - if not command: - command = [] - - return stream( - self.api_client.connect_post_namespaced_pod_exec, - name=self.pod_name, - namespace=self.namespace, - container=container, - command=command, - stderr=False, - stdin=False, - stdout=True, - tty=False, - ) - - def reload_policies(self): - command = [SEC_AGENT_PATH, 'runtime', 'policy', 'reload'] - self.exec_command("security-agent", command=command) - - def download_policies(self): - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - app_key = os.environ["DD_APP_KEY"] - command = [ - "/bin/bash", - "-c", - "export DD_SITE=" - + site - + " ; export DD_API_KEY=" - + api_key - + " ; export DD_APP_KEY=" - + app_key - + " ; " - + SEC_AGENT_PATH - + " runtime policy download", - ] - return self.exec_command("security-agent", command=command) - - def push_policies(self, policies): - temppolicy = tempfile.NamedTemporaryFile(prefix="e2e-policy-", mode="w", delete=False) - temppolicy.write(policies) - temppolicy.close() - temppolicy_path = temppolicy.name - self.exec_command("security-agent", command=["mkdir", "-p", "/tmp/runtime-security.d"]) - self.cp_to_agent("security-agent", temppolicy_path, "/tmp/runtime-security.d/downloaded.policy") - os.remove(temppolicy_path) - - def cp_to_agent(self, agent_name, src_file, dst_file): - command = ['tar', 'xvf', '-', '-C', '/tmp'] - resp = stream( - self.api_client.connect_post_namespaced_pod_exec, - name=self.pod_name, - namespace=self.namespace, - container=agent_name, - command=command, - stderr=True, - stdin=True, - stdout=True, - tty=False, - _preload_content=False, - ) - - with tempfile.TemporaryFile() as tar_buffer: - with tarfile.open(fileobj=tar_buffer, mode='w') as tar: - tar.add(src_file) - - tar_buffer.seek(0) - commands = [] - commands.append(tar_buffer.read()) - - while resp.is_open(): - resp.update(timeout=1) - if commands: - c = commands.pop(0) - resp.write_stdin(c) - else: - break - resp.close() - - dirname = os.path.dirname(dst_file) - command = ['mkdir', '-p', dirname] - self.exec_command(agent_name, command=command) - - command = ['mv', f'/tmp/{src_file}', dst_file] - self.exec_command(agent_name, command=command) diff --git a/test/e2e/cws-tests/tests/lib/log.py b/test/e2e/cws-tests/tests/lib/log.py deleted file mode 100644 index 5bbf5c1d3349e..0000000000000 --- a/test/e2e/cws-tests/tests/lib/log.py +++ /dev/null @@ -1,21 +0,0 @@ -from abc import ABC, abstractmethod - -from retry.api import retry_call - - -class LogGetter(ABC): - @abstractmethod - def get_log(self, _agent_name): - raise NotImplementedError() - - -def _wait_agent_log(agent_name, log_getter, pattern): - lines = log_getter.get_log(agent_name) - for line in lines: - if pattern in line: - return - raise LookupError(f"{agent_name} | {pattern}") - - -def wait_agent_log(agent_name, log_getter, pattern, tries=10, delay=5): - return retry_call(_wait_agent_log, fargs=[agent_name, log_getter, pattern], tries=tries, delay=delay) diff --git a/test/e2e/cws-tests/tests/lib/stepper.py b/test/e2e/cws-tests/tests/lib/stepper.py deleted file mode 100644 index fbfec4312c060..0000000000000 --- a/test/e2e/cws-tests/tests/lib/stepper.py +++ /dev/null @@ -1,18 +0,0 @@ -import emoji - - -class Step: - def __init__(self, msg="", emoji=""): - self.msg = msg - self.emoji = emoji - - def __enter__(self): - _emoji = emoji.emojize(self.emoji) - print(f"{_emoji} {self.msg}... ", end="", flush=True) - return self - - def __exit__(self, exc_type, _exc_val, _exc_tb): - if exc_type is None: - print(emoji.emojize(":check_mark:"), flush=True) - else: - print(emoji.emojize(":cross_mark:"), flush=True) diff --git a/test/e2e/cws-tests/tests/test_e2e_cspm.py b/test/e2e/cws-tests/tests/test_e2e_cspm.py deleted file mode 100644 index 57b0ac3f3ae44..0000000000000 --- a/test/e2e/cws-tests/tests/test_e2e_cspm.py +++ /dev/null @@ -1,34 +0,0 @@ -from lib.cspm.finding import is_subset - - -def expect_findings(test_case, findings, expected_findings): - findings_by_rule = {} - for agent_rule_id, rule_findings in findings.items(): - findings_by_rule.setdefault(agent_rule_id, []).extend(rule_findings) - for finding in rule_findings: - print(f"finding {agent_rule_id} {finding}") - - for rule_id, expected_rule_findings in expected_findings.items(): - for expected_rule_finding in expected_rule_findings: - test_case.assertIn(rule_id, findings_by_rule) - found = False - rule_findings = findings_by_rule.get(rule_id, []) - for finding in rule_findings: - if is_subset(expected_rule_finding, finding): - found = True - break - - test_case.assertTrue(found, f"unexpected finding {finding} for rule {rule_id}") - del findings_by_rule[rule_id] - - for rule_id, rule_findings in findings_by_rule.items(): - for finding in rule_findings: - result = finding["result"] - print(f"finding {rule_id} {result}") - - for rule_id, rule_findings in findings_by_rule.items(): - for finding in rule_findings: - result = finding["result"] - test_case.assertNotIn( - result, ("failed", "error"), f"finding for rule {rule_id} not expected to be in failed or error state" - ) diff --git a/test/e2e/cws-tests/tests/test_e2e_cspm_docker.py b/test/e2e/cws-tests/tests/test_e2e_cspm_docker.py deleted file mode 100644 index bbc365807542c..0000000000000 --- a/test/e2e/cws-tests/tests/test_e2e_cspm_docker.py +++ /dev/null @@ -1,150 +0,0 @@ -import json -import os -import socket -import time -import unittest -import uuid -import warnings - -from lib.config import gen_datadog_agent_config -from lib.cspm.api import App -from lib.docker import DockerHelper -from lib.stepper import Step -from test_e2e_cspm import expect_findings - - -class TestE2EDocker(unittest.TestCase): - def setUp(self): - warnings.simplefilter("ignore", category=ResourceWarning) - warnings.simplefilter("ignore", category=UserWarning) - - self.docker_helper = DockerHelper() - self.app = App() - - def tearDown(self): - self.docker_helper.close() - - def test_privileged_container(self): - print("") - - test_id = str(uuid.uuid4())[:4] - with Step(msg="create privileged container", emoji=":construction:"): - pc = self.docker_helper.client.containers.run( - "ubuntu:latest", - command="sleep 7200", - detach=True, - remove=True, - privileged=True, - ) - self.container_id = pc.id - - with Step(msg="check agent start", emoji=":man_running:"): - image = os.getenv("DD_AGENT_IMAGE") - hostname = f"host_{test_id}" - self.datadog_agent_config = gen_datadog_agent_config( - hostname=hostname, log_level="DEBUG", tags=["tag1", "tag2"] - ) - - self.container = self.docker_helper.start_cspm_agent( - image, - datadog_agent_config=self.datadog_agent_config, - ) - self.assertIsNotNone(self.container, msg="unable to start container") - - self.docker_helper.wait_agent_container() - - with Step(msg="check agent events", emoji=":check_mark_button:"): - self.container.exec_run("security-agent compliance check --dump-reports /tmp/reports.json --report") - _, output = self.container.exec_run("cat /tmp/reports.json") - print(output) - findings = json.loads(output) - - expected_findings = { - "cis-docker-1.2.0-5.4": [ - { - "agent_rule_id": "cis-docker-1.2.0-5.4", - "agent_framework_id": "cis-docker", - "result": "failed", - "resource_type": "docker_container", - "data": { - "container.id": self.container_id, - }, - } - ], - "cis-docker-1.2.0-1.2.1": [{"result": "failed"}], - "cis-docker-1.2.0-1.2.3": [{"result": "error"}], - "cis-docker-1.2.0-1.2.4": [{"result": "error"}], - "cis-docker-1.2.0-1.2.5": [{"result": "error"}], - "cis-docker-1.2.0-1.2.6": [{"result": "error"}], - "cis-docker-1.2.0-1.2.7": [{"result": "error"}], - "cis-docker-1.2.0-1.2.8": [{"result": "error"}], - "cis-docker-1.2.0-1.2.9": [{"result": "error"}], - "cis-docker-1.2.0-1.2.10": [{"result": "error"}], - "cis-docker-1.2.0-1.2.11": [{"result": "error"}], - "cis-docker-1.2.0-1.2.12": [{"result": "error"}], - "cis-docker-1.2.0-2.2": [{"result": "failed"}], - "cis-docker-1.2.0-2.3": [{"result": "failed"}], - "cis-docker-1.2.0-2.4": [{"result": "failed"}], - "cis-docker-1.2.0-2.6": [{"result": "failed"}], - "cis-docker-1.2.0-3.10": [{"result": "error"}], - "cis-docker-1.2.0-3.11": [{"result": "error"}], - "cis-docker-1.2.0-3.12": [{"result": "error"}], - "cis-docker-1.2.0-3.13": [{"result": "error"}], - "cis-docker-1.2.0-3.14": [{"result": "error"}], - "cis-docker-1.2.0-3.15": [{"result": "error"}], - "cis-docker-1.2.0-3.16": [{"result": "error"}], - "cis-docker-1.2.0-3.17": [{"result": "error"}], - "cis-docker-1.2.0-3.18": [{"result": "error"}], - "cis-docker-1.2.0-3.19": [{"result": "error"}], - "cis-docker-1.2.0-3.20": [{"result": "error"}], - "cis-docker-1.2.0-3.21": [{"result": "error"}], - "cis-docker-1.2.0-3.22": [{"result": "error"}], - "cis-docker-1.2.0-3.7": [{"result": "error"}], - "cis-docker-1.2.0-3.8": [{"result": "error"}], - "cis-docker-1.2.0-3.9": [{"result": "error"}], - "cis-docker-1.2.0-4.1": [{"result": "failed"}], - "cis-docker-1.2.0-4.6": [{"result": "failed"}], - "cis-docker-1.2.0-5.1": [{"result": "failed"}], - "cis-docker-1.2.0-5.10": [{"result": "failed"}], - "cis-docker-1.2.0-5.11": [{"result": "failed"}], - "cis-docker-1.2.0-5.12": [{"result": "failed"}], - "cis-docker-1.2.0-5.14": [{"result": "failed"}], - "cis-docker-1.2.0-5.2": [{"result": "error"}], - "cis-docker-1.2.0-5.25": [{"result": "failed"}], - "cis-docker-1.2.0-5.26": [{"result": "failed"}], - "cis-docker-1.2.0-5.28": [{"result": "failed"}], - "cis-docker-1.2.0-5.31": [{"result": "failed"}], - "cis-docker-1.2.0-5.7": [{"result": "failed"}], - } - - expect_findings(self, findings, expected_findings) - - with Step(msg="wait for intake (~1m)", emoji=":alarm_clock:"): - time.sleep(1 * 60) - - with Step(msg="wait for datadog.security_agent.compliance.running metric", emoji="\N{BEER MUG}"): # fmt: off - self.app.wait_for_metric("datadog.security_agent.compliance.running", host=socket.gethostname()) - - ## Disabled while no CSPM API is available - # with Step(msg="check app compliance event", emoji=":SOON_arrow:"): - # wait_for_compliance_event(f"resource_id:*{self.container_id}") - - with Step(msg="wait for finding generation (~1m)", emoji=":alarm_clock:"): - time.sleep(1 * 60) - - with Step(msg="wait for datadog.security_agent.compliance.containers_running metric", emoji="\N{BEER MUG}"): # fmt: off - self.app.wait_for_metric( - "datadog.security_agent.compliance.containers_running", container_id=self.container_id - ) - - ## Disabled while no CSPM API is available - # with Step(msg="check app finding", emoji=":chart_increasing_with_yen:"): - # wait_for_findings(f"@resource_type:docker_container @container_id:{self.container_id}") - - -def main(): - unittest.main() - - -if __name__ == "__main__": - main() diff --git a/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py b/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py deleted file mode 100644 index ef0871c049f30..0000000000000 --- a/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py +++ /dev/null @@ -1,224 +0,0 @@ -import argparse -import sys -import time -import unittest -import warnings - -import emoji -from lib.cspm.api import App -from lib.kubernetes import KubernetesHelper -from lib.stepper import Step -from test_e2e_cspm import expect_findings - - -class TestE2EKubernetes(unittest.TestCase): - namespace = "default" - in_cluster = False - expectedFindingsMasterEtcdNode = { - "cis-kubernetes-1.5.1-1.1.12": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.16": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.19": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.21": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.22": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.23": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.24": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.25": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.26": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.33": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.6": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.3.2": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.3.3": [ - { - "result": "passed", - } - ], - "cis-kubernetes-1.5.1-1.3.4": [ - { - "result": "passed", - } - ], - "cis-kubernetes-1.5.1-1.3.5": [ - { - "result": "passed", - } - ], - "cis-kubernetes-1.5.1-1.3.6": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.3.7": [ - { - "result": "passed", - } - ], - "cis-kubernetes-1.5.1-1.4.1": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-3.2.1": [ - { - "result": "failed", - } - ], - } - expectedFindingsWorkerNode = { - "cis-kubernetes-1.5.1-4.2.1": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.3": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.4": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.5": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.6": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.10": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.12": [ - { - "result": "failed", - } - ], - } - hostname = "k8s-e2e-tests-control-plane" - - def setUp(self): - warnings.simplefilter("ignore", category=ResourceWarning) - warnings.simplefilter("ignore", category=UserWarning) - warnings.simplefilter("ignore", category=DeprecationWarning) - - self.kubernetes_helper = KubernetesHelper(namespace=self.namespace, in_cluster=self.in_cluster) - self.resource_id = "k8s-e2e-tests-control-plane_kubernetes_*_node" - self.app = App() - - def test_k8s(self): - print("") - - agent_name = "security-agent" - - with Step(msg="select pod", emoji=":man_running:"): - self.kubernetes_helper.select_pod_name("app.kubernetes.io/component=agent") - - with Step(msg="check agent events", emoji=":check_mark_button:"): - self.kubernetes_helper.exec_command( - agent_name, ["security-agent", "compliance", "check", "--dump-reports", "/tmp/reports", "--report"] - ) - output = self.kubernetes_helper.exec_command(agent_name, ["bash", "-c", "cat /tmp/reports"]) - print(output) - # if the output is JSON, it automatically calls json.loads on it. Yeah, I know... I've felt the same too - findings = eval(output) - expected_findings = dict( - **TestE2EKubernetes.expectedFindingsMasterEtcdNode, **TestE2EKubernetes.expectedFindingsWorkerNode - ) - expect_findings(self, findings, expected_findings) - - with Step(msg="wait for intake (~1m)", emoji=":alarm_clock:"): - time.sleep(1 * 60) - - with Step(msg="wait for datadog.security_agent.compliance.running metric", emoji="\N{beer mug}"): # fmt: off - self.app.wait_for_metric("datadog.security_agent.compliance.running", host=TestE2EKubernetes.hostname) - - ## Disabled while no CSPM API is available - # with Step(msg="check app compliance event", emoji=":SOON_arrow:"): - # wait_for_compliance_event(f"resource_id:{self.resource_id}") - - with Step(msg="wait for finding generation (~1m)", emoji=":alarm_clock:"): - time.sleep(1 * 60) - - with Step(msg="wait for datadog.security_agent.compliance.containers_running metric", emoji="\N{beer mug}"): # fmt: off - self.app.wait_for_metric( - "datadog.security_agent.compliance.containers_running", host=TestE2EKubernetes.hostname - ) - - ## Disabled while no CSPM API is available - # with Step(msg="check app findings", emoji=":chart_increasing_with_yen:"): - # wait_for_findings(f"@resource_type:kubernetes_*_node @resource:{self.resource_id}") - - print(emoji.emojize(":heart_on_fire:"), flush=True) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--namespace", default="default") - parser.add_argument("--in-cluster", action="store_true") - parser.add_argument("unittest_args", nargs="*") - args = parser.parse_args() - - # setup some specific tests - TestE2EKubernetes.namespace = args.namespace - TestE2EKubernetes.in_cluster = args.in_cluster - - unit_argv = [sys.argv[0]] + args.unittest_args - unittest.main(argv=unit_argv) - - -if __name__ == "__main__": - main() diff --git a/test/e2e/docs/run-instance.svg b/test/e2e/docs/run-instance.svg deleted file mode 100644 index 082cdb69d0b99..0000000000000 --- a/test/e2e/docs/run-instance.svg +++ /dev/null @@ -1,220 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
-
-
-
- - - - -
- -
-
-
-
- - - - -
- -
-
-
-
- - - - -
- -
-
-
-
- - - - -
- -
-
-
-
- - - - -
- wait completion -
-
-
-
- - - - -
- -
-
-
-
-
- - - - - - -
Run Instance
-
-
-
-
- - - - - -
kind create cluster
-
-
-
-
- - - - - -
kind cluster ready
-
-
-
-
- - - - - -
argo download
-
-
-
-
- - - - - -
argo setup
-
-
-
-
- - - - - -
argo submit
-
-
-
-
- - - - - -
argo get results
-
-
-
-
- - - - - -
exit with code
-
-
-
-
-
-
-
-
\ No newline at end of file diff --git a/test/e2e/docs/sequence.md b/test/e2e/docs/sequence.md deleted file mode 100644 index 7cbab0d4037e4..0000000000000 --- a/test/e2e/docs/sequence.md +++ /dev/null @@ -1,41 +0,0 @@ -# Generate sequence - -## Update process - -1. Copy paste the content of each sequence in the [online tool](https://github.com/mermaidjs/mermaid-live-editor). -2. Download the image generated -3. move it to replace the old one - -### Online data - -[setup-instance](../scripts/setup-instance): - -```text -graph TD -A{setup-instance} -->B(AWS specification) -B --> C[ignition] -C --> D(sshAuthorizedKeys) -D -->B -B --> E[ec2] -E --> F(request-spot-instances) -F --> G(describe-spot-instance-requests) -G -->|Instance created| H(create-tags) -H -->|instance and spot requests| I(describe-instances) -I -->|Get PrivateIpAddress| J(cancel-spot-instance-requests) -J --> K[ssh] -K --> L(git clone and checkout) -L --> M{run-instance} -``` - - -[run-instance](../scripts/run-instance) -```text -graph TD -A{Run Instance} -->B[kind create cluster] -B --> C[kind cluster ready] -C --> D[argo download] -D --> E[argo setup] -E --> F[argo submit] -F -->|wait completion| G[argo get results] -G --> H{exit with code} -``` diff --git a/test/e2e/docs/setup-instance.svg b/test/e2e/docs/setup-instance.svg deleted file mode 100644 index cc69bf8b9d108..0000000000000 --- a/test/e2e/docs/setup-instance.svg +++ /dev/null @@ -1,350 +0,0 @@ -
Instance created
instance and spot requests
Get PrivateIpAddress
setup-instance
AWS specification
ignition
sshAuthorizedKeys
ec2
request-spot-instances
describe-spot-instance-requests
create-tags
describe-instances
cancel-spot-instance-requests
ssh
git clone and checkout
run-instance
\ No newline at end of file diff --git a/test/e2e/scripts/generate-parameters.sh b/test/e2e/scripts/generate-parameters.sh deleted file mode 100755 index 5dffa47feabbf..0000000000000 --- a/test/e2e/scripts/generate-parameters.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/bash - -##### A script to generate a unique namespace ##### -##### and a parameters file for a workflow ##### - - -##### Exit on error ##### -set -e - -##### Source utility functions ##### -source utils.sh - -##### Functions ##### - -usage() -{ - echo 'Usage: ./generate-parameters.sh [[-w workflow -g workflow_group] | [-h]] -Example: ./generate-parameters.sh -g workflow_group -w workflow -Flags: --w, --workflow workflow name --g, --workflow-group workflow group name --o, --output-file generated yaml file name (default parameters.yaml) --d, --workflows-dir the directory where workflows are defined (default ../argo-workflows)' -} - -validate_input() -{ - # Validate workflow name characters - if ! [[ $WORKFLOW =~ ^[0-9a-zA-Z-]+$ ]]; then - echo "Error: Invalid workflow name format: $WORKFLOW" - exit 1 - fi - - # Validate workflow group name characters - if ! [[ $WORKFLOW_GROUP =~ ^[0-9a-zA-Z._-]+$ ]]; then - echo "Error: Invalid workflow group name format: $WORKFLOW_GROUP" - exit 1 - fi -} - -# Usage: generate_parameters -generate_parameters() -{ - # Merging parameters - echo 'Info: Merging parameters...' - YK_MERGE_COMMAND='yq merge --overwrite --allow-empty' - DEFAULT_GLOBAL_PARAM="$WORKFLOWS_DIR/defaults/parameters.yaml" - DEFAULT_GROUP_PARAM="$WORKFLOWS_DIR/$WORKFLOW_GROUP/defaults/parameters.yaml" - WORKFLOW_PARAM="$WORKFLOWS_DIR/$WORKFLOW_GROUP/$WORKFLOW/parameters.yaml" - TMP_YAML_PATH="$1.tmp.yaml" - $YK_MERGE_COMMAND "$DEFAULT_GLOBAL_PARAM" "$DEFAULT_GROUP_PARAM" "$WORKFLOW_PARAM" > "$TMP_YAML_PATH" - - # Rendering namespace - echo 'Info: Parameters merged, rendering namespace and saving file...' - NAMESPACE_TEMPLATE_VAR="{{ namespace }}" - sed -e "s/$NAMESPACE_TEMPLATE_VAR/$1/g" "$TMP_YAML_PATH" > "$OUTPUT_YAML_FILE" - echo "Info: Generated parameters, yaml file saved: $OUTPUT_YAML_FILE" - - # Cleanup temp file - rm "$TMP_YAML_PATH" -} - - -##### Main ##### - -WORKFLOW="" -WORKFLOW_GROUP="" -NAMESPACE="" -OUTPUT_YAML_FILE="parameters.yaml" -WORKFLOWS_DIR="../argo-workflows" - -if [ "$1" == "" ]; then - usage - exit 1 -fi - -while [ "$1" != "" ]; do - case $1 in - -w | --workflow ) shift - WORKFLOW=$1 - ;; - -g | --workflow-group ) shift - WORKFLOW_GROUP=$1 - ;; - -o | --output-file ) shift - OUTPUT_YAML_FILE=$1 - ;; - -d | --workflows-dir ) shift - WORKFLOWS_DIR=$1 - ;; - -h | --help ) usage - exit - ;; - * ) usage - exit 1 - esac - shift -done - -# Only proceed when `yq` is installed -check_yq_installed - -# Validate the parameters -validate_input - -# Generate namespace -generate_namespace "$WORKFLOW_GROUP" "$WORKFLOW" - -# Generate the parameters file -generate_parameters "$NAMESPACE" diff --git a/test/e2e/scripts/run-instance/.gitignore b/test/e2e/scripts/run-instance/.gitignore deleted file mode 100644 index 9e2dbce48e383..0000000000000 --- a/test/e2e/scripts/run-instance/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -argo -argo.gz diff --git a/test/e2e/scripts/run-instance/10-setup-kind.sh b/test/e2e/scripts/run-instance/10-setup-kind.sh deleted file mode 100755 index bcb879510b035..0000000000000 --- a/test/e2e/scripts/run-instance/10-setup-kind.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -arch="" -case $(uname -m) in - x86_64) arch="amd64" ;; - aarch64) arch="arm64" ;; - *) - echo "Unsupported architecture" - exit 1 - ;; -esac - -download_and_install_kubectl() { - curl --retry 5 --fail --retry-all-errors -LO "https://dl.k8s.io/release/$(curl --retry 5 --fail --retry-all-errors -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$arch/kubectl" - sudo install kubectl /usr/local/bin/kubectl -} - -printf '=%.0s' {0..79} ; echo - -if [[ $(uname) == "Darwin" ]] -then - echo "Kind setup should not be run on Darwin" - exit 1 -fi - - -# if kubctl is not here, download it -if [[ ! -f ./kubectl ]]; then - download_and_install_kubectl -else - # else, download the SHA256 of the wanted version - curl --retry 5 --fail --retry-all-errors -LO "https://dl.k8s.io/release/$(curl --retry 5 --fail --retry-all-errors -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$arch/kubectl.sha256" - # And if it differs, force the download again - if ! echo "$(/dev/null 2>&1; then - # skip the usermod step if needless - sudo usermod -a -G docker core -fi - -echo "Kind setup finished" diff --git a/test/e2e/scripts/run-instance/11-setup-kind-cluster.sh b/test/e2e/scripts/run-instance/11-setup-kind-cluster.sh deleted file mode 100755 index 812a37c58943b..0000000000000 --- a/test/e2e/scripts/run-instance/11-setup-kind-cluster.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -is_cluster_running=$(kind get clusters|grep k8s-e2e-tests||echo none) -if [ "$is_cluster_running" == "k8s-e2e-tests" ]; then - echo "Cleanup: deleting cluster k8s-e2e-tests" - kind delete cluster --name k8s-e2e-tests -fi - -echo "Setup kind cluster: k8s-e2e-tests" -SCRIPT_DIR=$(dirname "$(readlink -f "$0")") -kind create cluster --name k8s-e2e-tests --wait 10m --config "$SCRIPT_DIR/kind-cluster.yaml" diff --git a/test/e2e/scripts/run-instance/20-argo-download.sh b/test/e2e/scripts/run-instance/20-argo-download.sh deleted file mode 100755 index a29702de2fd27..0000000000000 --- a/test/e2e/scripts/run-instance/20-argo-download.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -cd "$(dirname "$0")" - -set -e - -arch="" -case $(uname -m) in - x86_64) arch="amd64" ;; - aarch64) arch="arm64" ;; - *) - echo "Unsupported architecture" - exit 1 - ;; -esac - -# if argo is not here, or if the SHA doesnt match, (re)download it -if [[ ! -f ./argo.gz ]] || ! sha256sum -c "argo.$arch.sha256sum" ; then - curl -Lf "https://github.com/argoproj/argo-workflows/releases/download/v3.4.3/argo-linux-$arch.gz" -o argo.gz - # before gunziping it, check its SHA - if ! sha256sum -c "argo.$arch.sha256sum"; then - echo "SHA256 of argo.gz differs, exiting." - exit 1 - fi -fi -if [[ ! -f ./argo. ]]; then - gunzip -kf argo.gz -fi -chmod +x ./argo -./argo version diff --git a/test/e2e/scripts/run-instance/21-argo-setup.sh b/test/e2e/scripts/run-instance/21-argo-setup.sh deleted file mode 100755 index 1dae3970954a4..0000000000000 --- a/test/e2e/scripts/run-instance/21-argo-setup.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -cd "$(dirname "$0")" - -for i in {0..60} -do - kubectl get hpa,svc,ep,ds,deploy,job,po --all-namespaces -o wide && break - sleep 5 -done - -set -e - -kubectl create namespace argo -kubectl apply -n argo -f https://github.com/argoproj/argo-workflows/releases/download/v3.4.3/install.yaml - -# TODO use a more restrictive SA -kubectl apply -f - << EOF -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: argo-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: ServiceAccount - name: default - namespace: default -EOF - -set +e - -for i in {0..60} -do - ./argo list && exit 0 - kubectl get hpa,svc,ep,ds,deploy,job,po --all-namespaces -o wide - sleep 5 -done - -exit 1 diff --git a/test/e2e/scripts/run-instance/22-argo-submit.sh b/test/e2e/scripts/run-instance/22-argo-submit.sh deleted file mode 100755 index 9c59119372ee5..0000000000000 --- a/test/e2e/scripts/run-instance/22-argo-submit.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -# ${DATADOG_AGENT_IMAGE} and ${DATADOG_CLUSTER_AGENT_IMAGE} are provided by the CI -if [[ -z ${DATADOG_AGENT_IMAGE:+x} ]] || [[ -z ${DATADOG_CLUSTER_AGENT_IMAGE:+x} ]]; then - echo "DATADOG_AGENT_IMAGE and DATADOG_CLUSTER_AGENT_IMAGE environment variables need to be set" >&2 - exit 2 -fi - -ARGO_WORKFLOW=${ARGO_WORKFLOW:-''} - -echo "DATADOG_AGENT_IMAGE=${DATADOG_AGENT_IMAGE}" -echo "DATADOG_CLUSTER_AGENT_IMAGE=${DATADOG_CLUSTER_AGENT_IMAGE}" -echo "ARGO_WORKFLOW=${ARGO_WORKFLOW}" - -cd "$(dirname "$0")" - -if [[ -n ${DOCKER_REGISTRY_URL+x} ]] && [[ -n ${DOCKER_REGISTRY_LOGIN+x} ]] && [[ -n ${DOCKER_REGISTRY_PWD+x} ]]; then - oldstate=$(shopt -po xtrace ||:); set +x # Do not log credentials - kubectl create secret docker-registry docker-registry --docker-server="$DOCKER_REGISTRY_URL" --docker-username="$DOCKER_REGISTRY_LOGIN" --docker-password="$DOCKER_REGISTRY_PWD" - eval "$oldstate" -fi - -argo_submit_cws_cspm() { - DATADOG_AGENT_SITE=${DATADOG_AGENT_SITE:-""} - - oldstate=$(shopt -po xtrace ||:); set +x # Do not log credentials - - if [[ -z ${DATADOG_AGENT_API_KEY:+x} ]] || [[ -z ${DATADOG_AGENT_APP_KEY:+x} ]]; then - echo "DATADOG_AGENT_API_KEY, DATADOG_AGENT_APP_KEY environment variables need to be set" >&2 - exit 2 - fi - - kubectl create secret generic dd-keys \ - --from-literal=DD_API_KEY="${DATADOG_AGENT_API_KEY}" \ - --from-literal=DD_APP_KEY="${DATADOG_AGENT_APP_KEY}" \ - --from-literal=DD_DDDEV_API_KEY="${DD_API_KEY}" - - eval "$oldstate" - - ./argo template create ../../argo-workflows/templates/*.yaml - ./argo submit ../../argo-workflows/$1 --wait \ - --parameter datadog-agent-image-repository="${DATADOG_AGENT_IMAGE%:*}" \ - --parameter datadog-agent-image-tag="${DATADOG_AGENT_IMAGE#*:}" \ - --parameter datadog-cluster-agent-image-repository="${DATADOG_CLUSTER_AGENT_IMAGE%:*}" \ - --parameter datadog-cluster-agent-image-tag="${DATADOG_CLUSTER_AGENT_IMAGE#*:}" \ - --parameter datadog-agent-site="${DATADOG_AGENT_SITE#*:}" \ - --parameter ci_commit_short_sha="${CI_COMMIT_SHORT_SHA:-unknown}" \ - --parameter ci_pipeline_id="${CI_PIPELINE_ID:-unknown}" \ - --parameter ci_job_id="${CI_JOB_ID:-unknown}" || : -} - -case "$ARGO_WORKFLOW" in - "cspm") - argo_submit_cws_cspm cspm-workflow.yaml - ;; - *) - kubectl create secret generic dd-keys \ - --from-literal=DD_API_KEY=123er \ - --from-literal=DD_APP_KEY=123er1 \ - --from-literal=DD_DDDEV_API_KEY="${DD_API_KEY}" - - ./argo template create ../../argo-workflows/templates/*.yaml - ./argo submit "../../argo-workflows/${ARGO_WORKFLOW}-workflow.yaml" --wait \ - --parameter datadog-agent-image-repository="${DATADOG_AGENT_IMAGE%:*}" \ - --parameter datadog-agent-image-tag="${DATADOG_AGENT_IMAGE#*:}" \ - --parameter datadog-cluster-agent-image-repository="${DATADOG_CLUSTER_AGENT_IMAGE%:*}" \ - --parameter datadog-cluster-agent-image-tag="${DATADOG_CLUSTER_AGENT_IMAGE#*:}" \ - --parameter ci_commit_short_sha="${CI_COMMIT_SHORT_SHA:-unknown}" \ - --parameter ci_pipeline_id="${CI_PIPELINE_ID:-unknown}" \ - --parameter ci_job_id="${CI_JOB_ID:-unknown}" || : - ;; -esac - -# we are waiting for the end of the workflow but we don't care about its return code -exit 0 diff --git a/test/e2e/scripts/run-instance/23-argo-get.sh b/test/e2e/scripts/run-instance/23-argo-get.sh deleted file mode 100755 index 1c74939c56a94..0000000000000 --- a/test/e2e/scripts/run-instance/23-argo-get.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -ARGO_WORKFLOW=${ARGO_WORKFLOW:-''} - -cd "$(dirname "$0")" - -# Wait for any Running workflow -until [[ "$(./argo list --running -o name)" == "No workflows found" ]]; do - sleep 10 -done - -if [[ "$(./argo list -o name)" == "No workflows found" ]]; then - echo "No workflow found" - exit 1 -fi - -if ! locale -k LC_CTYPE | grep -qi 'charmap="utf-\+8"'; then - no_utf8_opt='--no-utf8' -fi - -for workflow in $(./argo list --status Succeeded -o name | grep -v 'No workflows found'); do - # CSPM always gets logs - if [ "$ARGO_WORKFLOW" = "cspm" ]; then - ./argo logs "$workflow" - fi - - ./argo get ${no_utf8_opt:-} "$workflow" -done - -EXIT_CODE=0 -for workflow in $(./argo list --status Failed -o name | grep -v 'No workflows found'); do - ./argo logs "$workflow" - ./argo get ${no_utf8_opt:-} "$workflow" - EXIT_CODE=2 -done - -# Make the Argo UI available from the user -kubectl --namespace argo patch service/argo-server --type json --patch $'[{"op": "replace", "path": "/spec/type", "value": "NodePort"}, {"op": "replace", "path": "/spec/ports", "value": [{"port": 2746, "nodePort": 30001, "targetPort": 2746}]}]' - -# In case of failure, let's keep the VM for 1 day instead of 2 hours for investigation -if [[ $EXIT_CODE != 0 ]]; then - sudo sed -i 's/^OnBootSec=.*/OnBootSec=86400/' /etc/systemd/system/terminate.timer - sudo systemctl daemon-reload - sudo systemctl restart terminate.timer -fi - -TIME_LEFT=$(systemctl status terminate.timer | awk '$1 == "Trigger:" {print gensub(/ *Trigger: (.*)/, "\\1", 1)}') -LOCAL_IP=$(curl -s http://169.254.169.254/2020-10-27/meta-data/local-ipv4) -BEGIN_TS=$(./argo list -o json | jq -r '.[] | .metadata.creationTimestamp' | while read -r ts; do date -d "$ts" +%s; done | sort -n | head -n 1) - -printf "\033[1mThe Argo UI will remain available at \033[1;34mhttps://%s\033[0m until \033[1;33m%s\033[0m.\n" "$LOCAL_IP" "$TIME_LEFT" -printf "\033[1mAll the logs of this job can be found at \033[1;34mhttps://dddev.datadoghq.com/logs?query=app%%3Aagent-e2e-tests%%20ci_commit_short_sha%%3A%s%%20ci_pipeline_id%%3A%s%%20ci_job_id%%3A%s&index=dd-agent-ci-e2e&from_ts=%d000&to_ts=%d000&live=false\033[0m.\n" "${CI_COMMIT_SHORT_SHA:-unknown}" "${CI_PIPELINE_ID:-unknown}" "${CI_JOB_ID:-unknown}" "$BEGIN_TS" "$(date +%s)" - -exit ${EXIT_CODE} diff --git a/test/e2e/scripts/run-instance/24-argo-to-ci-setup.sh b/test/e2e/scripts/run-instance/24-argo-to-ci-setup.sh deleted file mode 100755 index a971c9005a4ce..0000000000000 --- a/test/e2e/scripts/run-instance/24-argo-to-ci-setup.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -euo pipefail - -cd "$(dirname "$0")" - -docker build -t argo-to-junit-helper:local ./argo-to-junit \ No newline at end of file diff --git a/test/e2e/scripts/run-instance/25-argo-to-ci.sh b/test/e2e/scripts/run-instance/25-argo-to-ci.sh deleted file mode 100755 index 84512ab6a388a..0000000000000 --- a/test/e2e/scripts/run-instance/25-argo-to-ci.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -euo pipefail - -cd "$(dirname "$0")" - -if ! locale -k LC_CTYPE | grep -qi 'charmap="utf-\+8"'; then - no_utf8_opt='--no-utf8' -fi - -mkdir data - -for workflow in $(./argo list -o name | grep -v 'No workflows found'); do - JSON_CRD_FILE=data/$workflow.json - JUNIT_XML_FILE=data/$workflow-junit.xml - ./argo get ${no_utf8_opt:-} "$workflow" --output json > $JSON_CRD_FILE - docker run -v $PWD/data:/data:z argo-to-junit-helper:local /$JSON_CRD_FILE /$JUNIT_XML_FILE - DATADOG_API_KEY=$DD_API_KEY datadog-ci junit upload --service agent-e2e-tests $JUNIT_XML_FILE -done diff --git a/test/e2e/scripts/run-instance/argo-to-junit/Dockerfile b/test/e2e/scripts/run-instance/argo-to-junit/Dockerfile deleted file mode 100644 index 7f380f2a55eee..0000000000000 --- a/test/e2e/scripts/run-instance/argo-to-junit/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM gcr.io/google-containers/python:3.5.1-alpine - -COPY requirements.txt argo_to_junit.py entrypoint.sh / -RUN pip3 install -r requirements.txt - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/test/e2e/scripts/run-instance/argo-to-junit/argo_to_junit.py b/test/e2e/scripts/run-instance/argo-to-junit/argo_to_junit.py deleted file mode 100755 index 7e1d75c94517b..0000000000000 --- a/test/e2e/scripts/run-instance/argo-to-junit/argo_to_junit.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python3 - -import json -from argparse import ArgumentParser -from datetime import datetime - -from junit_xml import TestCase, TestSuite - - -def _str_to_datetime(date_str): - return datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ') - - -def _generate_test_suites(root_name, argo_nodes): - """ - Groups argo nodes by parents, generate the test cases - and yields the corresponding test suites. - """ - for node_id, node_status in argo_nodes.items(): - if node_status.get("type") in ["StepGroup", "DAG"]: - test_cases = [] - tc = TestCase(node_status.get("displayName", node_id)) - children = node_status.get("children", []) - for child_id in children: - child_status = argo_nodes.get(child_id, None) - if not child_status or child_status.get("type") != "Pod": - continue - children.extend(child_status.get("children", [])) - end = _str_to_datetime(child_status.get("finishedAt")) - start = _str_to_datetime(child_status.get("startedAt")) - job_duration = (end - start).total_seconds() - tc = TestCase(child_status.get("displayName"), elapsed_sec=job_duration) - if child_status.get("phase") == "Failed": - tc.add_failure_info(child_status.get("message")) - test_cases.append(tc) - if len(test_cases) == 0: - continue - parent_name = argo_nodes.get(node_status.get("boundaryID")).get("displayName") - # Some steps are tied directly to the root workflow (i.e the parent is argo-datadog-agent-*) - # Thus, we use a deterministic format to generate the test suite name in that case. - ts_name = parent_name if parent_name != root_name else "root" + "/" + node_status.get("displayName") - yield TestSuite(ts_name, test_cases) - - -def main(): - parser = ArgumentParser() - parser.add_argument("-i", "--input-file", help="File containing the Argo CRD in JSON", required=True) - parser.add_argument("-o", "--output-file", default="junit.xml", help="The junit xml file") - args = parser.parse_args() - - with open(args.input_file) as f: - crd = json.loads(f.read()) - crd_name = crd.get("metadata", {}).get("name") - nodes = crd.get("status", {}).get("nodes") - if not crd_name or not nodes: - print(json.dumps(crd)) - raise Exception("Incompatible CRD") - - test_suites = [] - for ts in _generate_test_suites(crd_name, nodes): - test_suites.append(ts) - with open(args.output_file, "w") as f: - TestSuite.to_file(f, test_suites) - - -if __name__ == "__main__": - main() diff --git a/test/e2e/scripts/run-instance/argo-to-junit/entrypoint.sh b/test/e2e/scripts/run-instance/argo-to-junit/entrypoint.sh deleted file mode 100755 index 72f1650ada344..0000000000000 --- a/test/e2e/scripts/run-instance/argo-to-junit/entrypoint.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -set -e - -if [ "$#" -ne 2 ]; then - /argo_to_junit.py --help - exit 1 -fi - -/argo_to_junit.py --input-file $1 --output-file $2 diff --git a/test/e2e/scripts/run-instance/argo-to-junit/requirements.txt b/test/e2e/scripts/run-instance/argo-to-junit/requirements.txt deleted file mode 100644 index 37ea29569761f..0000000000000 --- a/test/e2e/scripts/run-instance/argo-to-junit/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -junit-xml==1.9 diff --git a/test/e2e/scripts/run-instance/argo.amd64.sha256sum b/test/e2e/scripts/run-instance/argo.amd64.sha256sum deleted file mode 100644 index de69f0053c844..0000000000000 --- a/test/e2e/scripts/run-instance/argo.amd64.sha256sum +++ /dev/null @@ -1 +0,0 @@ -834a1cc0972a8810dfc39469b176d4dead17b0bc29968974da52d89b59357ac2 argo.gz \ No newline at end of file diff --git a/test/e2e/scripts/run-instance/argo.arm64.sha256sum b/test/e2e/scripts/run-instance/argo.arm64.sha256sum deleted file mode 100644 index ed3d0797a7dfd..0000000000000 --- a/test/e2e/scripts/run-instance/argo.arm64.sha256sum +++ /dev/null @@ -1 +0,0 @@ -e54086fd80f2e5de1c4ea9e7b935565b4404233ea4c96264055a7e16e85c376c argo.gz \ No newline at end of file diff --git a/test/e2e/scripts/run-instance/kind-cluster.yaml b/test/e2e/scripts/run-instance/kind-cluster.yaml deleted file mode 100644 index 4a8aed1991464..0000000000000 --- a/test/e2e/scripts/run-instance/kind-cluster.yaml +++ /dev/null @@ -1,18 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -nodes: -- role: control-plane - extraMounts: - - containerPath: /var/lib/kubelet/config.json - hostPath: /root/.docker/config.json - - containerPath: /host/datadog-agent - hostPath: /home/core/datadog-agent - - containerPath: /host/proc - hostPath: /proc - extraPortMappings: - - containerPort: 30001 - hostPort: 443 -containerdConfigPatches: - - |- - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://mirror.gcr.io", "https://registry-1.docker.io"] diff --git a/test/e2e/scripts/setup-instance/.gitignore b/test/e2e/scripts/setup-instance/.gitignore deleted file mode 100644 index 680a4486820dd..0000000000000 --- a/test/e2e/scripts/setup-instance/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -fcct-* -kind -kubectl -kubectl.sha256 -fedora.gpg -butane-* diff --git a/test/e2e/scripts/setup-instance/00-entrypoint-dev.sh b/test/e2e/scripts/setup-instance/00-entrypoint-dev.sh deleted file mode 100755 index bf1c60c084704..0000000000000 --- a/test/e2e/scripts/setup-instance/00-entrypoint-dev.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -BASE64_FLAGS="-w 0" -# OSX with 2 types of base64 binary in PATH ... -if [[ $(uname) == "Darwin" ]] -then - echo "Currently running over Darwin" - # shellcheck disable=SC2086 - echo "osx base64" | base64 ${BASE64_FLAGS} || { - echo "current base64 binary does not support ${BASE64_FLAGS}" - BASE64_FLAGS="" - } -fi - -set -e - -cd "$(dirname "$0")" - -git clean -fdx . - -# Generate ssh-key and ignition files -./01-ignition.sh -# shellcheck disable=SC2086 -IGNITION_BASE64=$(base64 ${BASE64_FLAGS} ignition.json) - -REGION="${REGION:-us-east-1}" -UPDATE_STREAM="${UPDATE_STREAM:-stable}" -AMI="$(curl "https://builds.coreos.fedoraproject.org/streams/${UPDATE_STREAM}.json" | jq -r ".architectures.x86_64.images.aws.regions.\"$REGION\".image")" - -tee specification.json << EOF -{ - "ImageId": "${AMI}", - "InstanceType": "t3.2xlarge", - "Monitoring": { - "Enabled": false - }, - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "DeleteOnTermination": true, - "VolumeSize": 50, - "VolumeType": "gp2" - } - } - ], - "UserData": "${IGNITION_BASE64}", - - "SubnetId": "subnet-b89e00e2", - "SecurityGroupIds": ["sg-7fedd80a"] -} -EOF - -export CI_COMMIT_SHORT_SHA=${CI_COMMIT_SHORT_SHA:-$(git describe --tags --always --dirty --match 7.\*)} - -exec ./02-ec2.sh diff --git a/test/e2e/scripts/setup-instance/00-entrypoint-gitlab.sh b/test/e2e/scripts/setup-instance/00-entrypoint-gitlab.sh deleted file mode 100755 index 7279be3fabb27..0000000000000 --- a/test/e2e/scripts/setup-instance/00-entrypoint-gitlab.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -cd "$(dirname "$0")" - -git clean -fdx . - -# Generate ssh-key and ignition files -./01-ignition.sh -IGNITION_BASE64=$(base64 -w 0 ignition.json) - -REGION="${REGION:-us-east-1}" -UPDATE_STREAM="${UPDATE_STREAM:-stable}" -if [ -z "${AMI+x}" ]; then - AMI="$(curl "https://builds.coreos.fedoraproject.org/streams/${UPDATE_STREAM}.json" | jq -r ".architectures.x86_64.images.aws.regions.\"$REGION\".image")" -fi -ARGO_WORKFLOW=${ARGO_WORKFLOW:-''} - -# TODO remove the IamInstanceProfile -tee specification.json << EOF -{ - "ImageId": "${AMI}", - "InstanceType": "t3.2xlarge", - "Monitoring": { - "Enabled": false - }, - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "DeleteOnTermination": true, - "VolumeSize": 50, - "VolumeType": "gp2" - } - } - ], - "UserData": "${IGNITION_BASE64}", - - "SubnetId": "subnet-05d7c6b1b5cfea811", - "IamInstanceProfile": { - "Name": "ci-datadog-agent-e2e-runner" - }, - "SecurityGroupIds": ["sg-019917348cb0eb7e7"] -} -EOF - -echo "Running inside a gitlab pipeline," -echo "using DATADOG_AGENT_IMAGE=${DATADOG_AGENT_IMAGE}" -echo "using DATADOG_CLUSTER_AGENT_IMAGE=${DATADOG_CLUSTER_AGENT_IMAGE}" -echo "using ARGO_WORKFLOW=${ARGO_WORKFLOW}" - -# Check if the image is hosted on a docker registry and if it's available -echo "${DATADOG_AGENT_IMAGE} is hosted on a docker registry, checking if it's available" -IMAGE_REPOSITORY=${DATADOG_AGENT_IMAGE%:*} -IMAGE_TAG=${DATADOG_AGENT_IMAGE#*:} -if ! curl -Lfs --head "https://hub.docker.com/v2/repositories/${IMAGE_REPOSITORY}/tags/${IMAGE_TAG}" > /dev/null ; then - echo "The DATADOG_AGENT_IMAGE=${DATADOG_AGENT_IMAGE} is not available on DockerHub" - echo "Ensure that the manual jobs in dev_container_deploy has been run/rerun" - echo "*dev_branch* -> k8s-e2e-*-dev" - echo "*dev_master* -> k8s-e2e-*-main" - exit 2 -fi - -echo "${DATADOG_CLUSTER_AGENT_IMAGE} is hosted on a docker registry, checking if it's available" -IMAGE_REPOSITORY=${DATADOG_CLUSTER_AGENT_IMAGE%:*} -IMAGE_TAG=${DATADOG_CLUSTER_AGENT_IMAGE#*:} -if ! curl -Lfs --head "https://hub.docker.com/v2/repositories/${IMAGE_REPOSITORY}/tags/${IMAGE_TAG}" > /dev/null ; then - echo "The DATADOG_CLUSTER_AGENT_IMAGE=${DATADOG_CLUSTER_AGENT_IMAGE} is not available on DockerHub" - echo "Ensure that the manual jobs in dev_container_deploy has been run/rerun" - echo "*dev_branch* -> k8s-e2e-*-dev" - echo "*dev_master* -> k8s-e2e-*-main" - exit 2 -fi - -exec ./02-ec2.sh diff --git a/test/e2e/scripts/setup-instance/00-entrypoint-local.sh b/test/e2e/scripts/setup-instance/00-entrypoint-local.sh deleted file mode 100755 index 6bf51fd15970b..0000000000000 --- a/test/e2e/scripts/setup-instance/00-entrypoint-local.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -printf '=%.0s' {0..79} ; echo -set -e -cd "$(dirname "$0")" - -../run-instance/10-setup-kind.sh -../run-instance/11-setup-kind-cluster.sh -../run-instance/20-argo-download.sh -../run-instance/21-argo-setup.sh -../run-instance/22-argo-submit.sh -../run-instance/23-argo-get.sh diff --git a/test/e2e/scripts/setup-instance/01-ignition.sh b/test/e2e/scripts/setup-instance/01-ignition.sh deleted file mode 100755 index 870d85ddeb4b7..0000000000000 --- a/test/e2e/scripts/setup-instance/01-ignition.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -cd "$(dirname "$0")" -ssh-keygen -b 4096 -t rsa -C "datadog" -N "" -f "id_rsa" -SSH_RSA=$(cat id_rsa.pub) - -arch=$(uname -m) -if [ "$arch" = "arm64" ]; -then - arch="aarch64" -fi - -case "$(uname)" in - Linux) butane="butane-$arch-unknown-linux-gnu";; - Darwin) butane="butane-$arch-apple-darwin";; -esac - -curl -O "https://fedoraproject.org/fedora.gpg" -curl -LOC - "https://github.com/coreos/butane/releases/download/v0.20.0/${butane}" -curl -LO "https://github.com/coreos/butane/releases/download/v0.20.0/${butane}.asc" - -gpgv --keyring ./fedora.gpg "${butane}.asc" "$butane" -chmod +x "$butane" - -"./$butane" --pretty --strict < -generate_namespace() -{ - # Generate unique namespace from workflow_group and workflow - # namespace format: --- - echo 'Info: Generating namespace...' - PREFIX=$1-$2 - # `_` and `.` are not allowed in namespace names, replace them with `-` - PREFIX=${PREFIX//[_.]/-} - CHECK_SUM=$(echo -n "$PREFIX" | md5sum | cut -c1-15) - NAMESPACE=$PREFIX-$CHECK_SUM - if ! [[ $NAMESPACE =~ ^[0-9a-zA-Z-]+$ ]]; then - echo "Error: Invalid namespace format: $NAMESPACE" - exit 1 - fi - echo "Info: Generated namespace: $NAMESPACE" -} - -# Usage: check_yq_installed -check_yq_installed() -{ - if ! [ -x "$(command -v yq)" ]; then - echo 'Error: yq is not installed.' - exit 1 - fi -} \ No newline at end of file diff --git a/test/fakeintake/Dockerfile b/test/fakeintake/Dockerfile index 4e2b2eb9ab7d7..aa77ba5a45231 100644 --- a/test/fakeintake/Dockerfile +++ b/test/fakeintake/Dockerfile @@ -2,7 +2,7 @@ # syntax=docker/dockerfile:1 ## Build -FROM golang:1.22.6-alpine3.20 AS build +FROM golang:1.22.7-alpine3.20 AS build # need gcc to build with CGO_ENABLED=1 # need musl-dev to get stdlib.h diff --git a/test/fakeintake/client/client.go b/test/fakeintake/client/client.go index 54df7e45a9df8..d31e22c992628 100644 --- a/test/fakeintake/client/client.go +++ b/test/fakeintake/client/client.go @@ -784,7 +784,11 @@ func (c *Client) get(route string) ([]byte, error) { defer tmpResp.Body.Close() if tmpResp.StatusCode != http.StatusOK { - return fmt.Errorf("expected %d got %d", http.StatusOK, tmpResp.StatusCode) + var errStr string + if errBody, _ := io.ReadAll(tmpResp.Body); len(errBody) > 0 { + errStr = string(errBody) + } + return fmt.Errorf("expected %d got %d: %s", http.StatusOK, tmpResp.StatusCode, errStr) } // If strictFakeintakeIDCheck is enabled, we check that the fakeintake ID is the same as the one we expect // If the fakeintake ID is not set yet we set the one we get from the first request diff --git a/test/integration/config_providers/etcd/etcd_provider_test.go b/test/integration/config_providers/etcd/etcd_provider_test.go index 228495b66e077..1d481a49695e9 100644 --- a/test/integration/config_providers/etcd/etcd_provider_test.go +++ b/test/integration/config_providers/etcd/etcd_provider_test.go @@ -17,8 +17,8 @@ import ( etcd_client "go.etcd.io/etcd/client/v2" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/test/integration/utils" ) @@ -142,7 +142,7 @@ func (suite *EtcdTestSuite) toggleEtcdAuth(enable bool) { func (suite *EtcdTestSuite) TestWorkingConnectionAnon() { ctx := context.Background() - config := config.ConfigurationProviders{ + config := pkgconfigsetup.ConfigurationProviders{ TemplateURL: suite.etcdURL, TemplateDir: "/foo", } @@ -163,7 +163,7 @@ func (suite *EtcdTestSuite) TestWorkingConnectionAnon() { func (suite *EtcdTestSuite) TestBadConnection() { ctx := context.Background() - config := config.ConfigurationProviders{ + config := pkgconfigsetup.ConfigurationProviders{ TemplateURL: "http://127.0.0.1:1337", TemplateDir: "/foo", } @@ -178,7 +178,7 @@ func (suite *EtcdTestSuite) TestBadConnection() { func (suite *EtcdTestSuite) TestWorkingAuth() { ctx := context.Background() suite.toggleEtcdAuth(true) - config := config.ConfigurationProviders{ + config := pkgconfigsetup.ConfigurationProviders{ TemplateURL: suite.etcdURL, TemplateDir: "/foo", Username: etcdUser, @@ -195,7 +195,7 @@ func (suite *EtcdTestSuite) TestWorkingAuth() { func (suite *EtcdTestSuite) TestBadAuth() { ctx := context.Background() suite.toggleEtcdAuth(true) - config := config.ConfigurationProviders{ + config := pkgconfigsetup.ConfigurationProviders{ TemplateURL: suite.etcdURL, TemplateDir: "/foo", Username: etcdUser, diff --git a/test/integration/config_providers/zookeeper/zookeeper_provider_test.go b/test/integration/config_providers/zookeeper/zookeeper_provider_test.go index fd230819aff43..b9e3aab142edc 100644 --- a/test/integration/config_providers/zookeeper/zookeeper_provider_test.go +++ b/test/integration/config_providers/zookeeper/zookeeper_provider_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/test/integration/utils" ) @@ -69,7 +69,7 @@ type ZkTestSuite struct { containerName string zkVersion string zkURL string - providerConfig config.ConfigurationProviders + providerConfig pkgconfigsetup.ConfigurationProviders compose *utils.ComposeConf } @@ -110,7 +110,7 @@ func (suite *ZkTestSuite) TearDownSuite() { // put configuration back in a known state before each test func (suite *ZkTestSuite) SetupTest() { - suite.providerConfig = config.ConfigurationProviders{ + suite.providerConfig = pkgconfigsetup.ConfigurationProviders{ TemplateURL: suite.zkURL, TemplateDir: "/datadog/check_configs", } diff --git a/test/integration/corechecks/docker/main_test.go b/test/integration/corechecks/docker/main_test.go index 1289154aed688..5d2d8a73ebff5 100644 --- a/test/integration/corechecks/docker/main_test.go +++ b/test/integration/corechecks/docker/main_test.go @@ -30,9 +30,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/docker" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/DataDog/datadog-agent/test/integration/utils" ) @@ -70,14 +71,15 @@ var ( func TestMain(m *testing.M) { flag.Parse() - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) retryTicker := time.NewTicker(time.Duration(*retryDelay) * time.Second) @@ -116,12 +118,12 @@ type testDeps struct { // Called before for first test run: compose up func setup() (workloadmeta.Component, error) { // Setup global conf - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(datadogCfgString)) + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(datadogCfgString)) if err != nil { return nil, err } - config.SetFeaturesNoCleanup(env.Docker) + env.SetFeaturesNoCleanup(env.Docker) // Note: workloadmeta will be started by fx with the App var deps testDeps diff --git a/test/integration/dogstatsd/origin_detection.go b/test/integration/dogstatsd/origin_detection.go index 0b2e87e5d6fa3..31b99916edfd3 100644 --- a/test/integration/dogstatsd/origin_detection.go +++ b/test/integration/dogstatsd/origin_detection.go @@ -27,7 +27,6 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap/pidmapimpl" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -43,7 +42,7 @@ const ( // we can't just `netcat` to the socket, that's why we run a custom python // script that will stay up after sending packets. func testUDSOriginDetection(t *testing.T, network string) { - coreConfig.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) cfg := map[string]any{} diff --git a/test/integration/dogstatsd/origin_detection_test.go b/test/integration/dogstatsd/origin_detection_test.go index 3216f7c9ad438..1f158aea9612e 100644 --- a/test/integration/dogstatsd/origin_detection_test.go +++ b/test/integration/dogstatsd/origin_detection_test.go @@ -8,32 +8,35 @@ package dogstatsd import ( "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) func TestUDSOriginDetectionDatagram(t *testing.T) { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) testUDSOriginDetection(t, "unixgram") } func TestUDSOriginDetectionStream(t *testing.T) { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) testUDSOriginDetection(t, "unix") diff --git a/test/integration/listeners/docker/docker_listener_test.go b/test/integration/listeners/docker/docker_listener_test.go index 739cdea88ff58..33049fe225c3b 100644 --- a/test/integration/listeners/docker/docker_listener_test.go +++ b/test/integration/listeners/docker/docker_listener_test.go @@ -31,11 +31,12 @@ import ( wmcatalog "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/catalog" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/DataDog/datadog-agent/test/integration/utils" ) @@ -63,14 +64,15 @@ type deps struct { func (suite *DockerListenerTestSuite) SetupSuite() { containers.ResetSharedFilter() - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) overrides := map[string]interface{}{ @@ -106,7 +108,7 @@ func (suite *DockerListenerTestSuite) TearDownSuite() { } func (suite *DockerListenerTestSuite) SetupTest() { - dl, err := listeners.NewContainerListener(&config.Listeners{}, optional.NewOption(suite.wmeta), suite.telemetryStore) + dl, err := listeners.NewContainerListener(&pkgconfigsetup.Listeners{}, optional.NewOption(suite.wmeta), suite.telemetryStore) if err != nil { panic(err) } diff --git a/test/integration/serverless/snapshots/error-csharp b/test/integration/serverless/snapshots/error-csharp index c030934e86cae..eedb59e5c8b87 100644 --- a/test/integration/serverless/snapshots/error-csharp +++ b/test/integration/serverless/snapshots/error-csharp @@ -533,6 +533,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1056,6 +1152,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-java b/test/integration/serverless/snapshots/error-java index 9aaf96d543760..61b596cc46b25 100644 --- a/test/integration/serverless/snapshots/error-java +++ b/test/integration/serverless/snapshots/error-java @@ -533,6 +533,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1056,6 +1152,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-node b/test/integration/serverless/snapshots/error-node index 58d2a83a13bd6..2061aaf660e7e 100644 --- a/test/integration/serverless/snapshots/error-node +++ b/test/integration/serverless/snapshots/error-node @@ -537,6 +537,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1060,6 +1156,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-proxy b/test/integration/serverless/snapshots/error-proxy index caf6b1aeedfb3..00fe9b830d005 100644 --- a/test/integration/serverless/snapshots/error-proxy +++ b/test/integration/serverless/snapshots/error-proxy @@ -533,6 +533,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1056,6 +1152,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-python b/test/integration/serverless/snapshots/error-python index e2c1e888a085c..fdb7620cf1e98 100644 --- a/test/integration/serverless/snapshots/error-python +++ b/test/integration/serverless/snapshots/error-python @@ -539,6 +539,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1062,6 +1158,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-csharp b/test/integration/serverless/snapshots/metric-csharp index e8212c0f4a253..92fb2db40cac3 100644 --- a/test/integration/serverless/snapshots/metric-csharp +++ b/test/integration/serverless/snapshots/metric-csharp @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1008,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-go b/test/integration/serverless/snapshots/metric-go index 24046cf6d6953..4abcdeff4f064 100644 --- a/test/integration/serverless/snapshots/metric-go +++ b/test/integration/serverless/snapshots/metric-go @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1008,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-java b/test/integration/serverless/snapshots/metric-java index bbb3debe64b28..2f7655ad42374 100644 --- a/test/integration/serverless/snapshots/metric-java +++ b/test/integration/serverless/snapshots/metric-java @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1008,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-node b/test/integration/serverless/snapshots/metric-node index 8bc4b04fa27e1..3576559a1be30 100644 --- a/test/integration/serverless/snapshots/metric-node +++ b/test/integration/serverless/snapshots/metric-node @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1008,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-proxy b/test/integration/serverless/snapshots/metric-proxy index 260310cc549e9..4948ad17b6646 100644 --- a/test/integration/serverless/snapshots/metric-proxy +++ b/test/integration/serverless/snapshots/metric-proxy @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1008,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-python b/test/integration/serverless/snapshots/metric-python index f2c2492f9ae59..b630d6bc3fa25 100644 --- a/test/integration/serverless/snapshots/metric-python +++ b/test/integration/serverless/snapshots/metric-python @@ -485,6 +485,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.fd_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], @@ -1008,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/util/kube_apiserver/apiserver_test.go b/test/integration/util/kube_apiserver/apiserver_test.go index 6cdf7cee2b3e3..784fdf6072467 100644 --- a/test/integration/util/kube_apiserver/apiserver_test.go +++ b/test/integration/util/kube_apiserver/apiserver_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" @@ -45,7 +44,7 @@ func TestSuiteKube(t *testing.T) { s := &testSuite{} // Env detection - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) // Start compose stack compose, err := initAPIServerCompose() diff --git a/test/integration/util/kubelet/insecurekubelet_test.go b/test/integration/util/kubelet/insecurekubelet_test.go index 2206846f31c5a..776f7a771e7c0 100644 --- a/test/integration/util/kubelet/insecurekubelet_test.go +++ b/test/integration/util/kubelet/insecurekubelet_test.go @@ -16,7 +16,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" @@ -102,7 +101,7 @@ func (suite *InsecureTestSuite) TestInsecureHTTPS() { } func TestInsecureKubeletSuite(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) compose, err := initInsecureKubelet() require.Nil(t, err) diff --git a/test/integration/util/kubelet/securekubelet_test.go b/test/integration/util/kubelet/securekubelet_test.go index 58c016510f3d7..90124ac5e4282 100644 --- a/test/integration/util/kubelet/securekubelet_test.go +++ b/test/integration/util/kubelet/securekubelet_test.go @@ -17,7 +17,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" @@ -141,7 +140,7 @@ func (suite *SecureTestSuite) TestTLSWithCACertificate() { } func TestSecureKubeletSuite(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) compose, certsConfig, err := initSecureKubelet() defer os.Remove(certsConfig.CertFilePath) diff --git a/test/integration/util/leaderelection/leaderelection_test.go b/test/integration/util/leaderelection/leaderelection_test.go index 292e133165a73..317a40e04edea 100644 --- a/test/integration/util/leaderelection/leaderelection_test.go +++ b/test/integration/util/leaderelection/leaderelection_test.go @@ -21,7 +21,6 @@ import ( "testing" "time" - "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" log "github.com/cihub/seelog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -31,7 +30,8 @@ import ( "k8s.io/client-go/tools/clientcmd" rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -111,7 +111,7 @@ func TestSuiteAPIServer(t *testing.T) { }() mockConfig := configmock.New(t) - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) mockConfig.SetWithoutSource("leader_election_default_resource", tt.leaderElectionDefaultResource) // Start compose stack diff --git a/test/kitchen/tasks/clean.sh b/test/kitchen/tasks/clean.sh index 3aa774275039f..c351060cc08ca 100755 --- a/test/kitchen/tasks/clean.sh +++ b/test/kitchen/tasks/clean.sh @@ -8,19 +8,19 @@ set -euo pipefail # These should not be printed out if [ -z ${AZURE_CLIENT_ID+x} ]; then - AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME) + AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID) export AZURE_CLIENT_ID fi if [ -z ${AZURE_CLIENT_SECRET+x} ]; then - AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME) + AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET) export AZURE_CLIENT_SECRET fi if [ -z ${AZURE_TENANT_ID+x} ]; then - AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME) + AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID) export AZURE_TENANT_ID fi if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then - AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME) + AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) export AZURE_SUBSCRIPTION_ID fi if [ -z ${DD_PIPELINE_ID+x} ]; then diff --git a/test/kitchen/tasks/run-test-kitchen.sh b/test/kitchen/tasks/run-test-kitchen.sh index a88290161363e..6e51d4013da71 100755 --- a/test/kitchen/tasks/run-test-kitchen.sh +++ b/test/kitchen/tasks/run-test-kitchen.sh @@ -54,25 +54,25 @@ if [ "$KITCHEN_PROVIDER" == "azure" ]; then # These should not be printed out set +x if [ -z ${AZURE_CLIENT_ID+x} ]; then - AZURE_CLIENT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME) + AZURE_CLIENT_ID=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID) # make sure whitespace is removed AZURE_CLIENT_ID="$(echo -e "${AZURE_CLIENT_ID}" | tr -d '[:space:]')" export AZURE_CLIENT_ID fi if [ -z ${AZURE_CLIENT_SECRET+x} ]; then - AZURE_CLIENT_SECRET=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME) + AZURE_CLIENT_SECRET=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET) # make sure whitespace is removed AZURE_CLIENT_SECRET="$(echo -e "${AZURE_CLIENT_SECRET}" | tr -d '[:space:]')" export AZURE_CLIENT_SECRET fi if [ -z ${AZURE_TENANT_ID+x} ]; then - AZURE_TENANT_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME) + AZURE_TENANT_ID=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID) # make sure whitespace is removed AZURE_TENANT_ID="$(echo -e "${AZURE_TENANT_ID}" | tr -d '[:space:]')" export AZURE_TENANT_ID fi if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then - AZURE_SUBSCRIPTION_ID=$($PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME) + AZURE_SUBSCRIPTION_ID=$($PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) # make sure whitespace is removed AZURE_SUBSCRIPTION_ID="$(echo -e "${AZURE_SUBSCRIPTION_ID}" | tr -d '[:space:]')" export AZURE_SUBSCRIPTION_ID @@ -101,7 +101,7 @@ elif [ "$KITCHEN_PROVIDER" == "ec2" ]; then export KITCHEN_EC2_SSH_KEY_ID="datadog-agent-kitchen" export KITCHEN_EC2_SSH_KEY_PATH="$(pwd)/aws-ssh-key" touch $KITCHEN_EC2_SSH_KEY_PATH && chmod 600 $KITCHEN_EC2_SSH_KEY_PATH - $PARENT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_EC2_SSH_KEY_SSM_NAME > $KITCHEN_EC2_SSH_KEY_PATH + $PARENT_DIR/tools/ci/fetch_secret.sh $KITCHEN_EC2_SSH_KEY > $KITCHEN_EC2_SSH_KEY_PATH fi fi diff --git a/test/kitchen/tasks/show-strays.sh b/test/kitchen/tasks/show-strays.sh index ab12b9a5edd5f..996c95f6ac04b 100755 --- a/test/kitchen/tasks/show-strays.sh +++ b/test/kitchen/tasks/show-strays.sh @@ -10,19 +10,19 @@ set -euo pipefail # These should not be printed out set +x if [ -z ${AZURE_CLIENT_ID+x} ]; then - AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_ID_SSM_NAME) + AZURE_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID) export AZURE_CLIENT_ID fi if [ -z ${AZURE_CLIENT_SECRET+x} ]; then - AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_CLIENT_SECRET_SSM_NAME) + AZURE_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET) export AZURE_CLIENT_SECRET fi if [ -z ${AZURE_TENANT_ID+x} ]; then - AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_TENANT_ID_SSM_NAME) + AZURE_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID) export AZURE_TENANT_ID fi if [ -z ${AZURE_SUBSCRIPTION_ID+x} ]; then - AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $KITCHEN_AZURE_SUBSCRIPTION_ID_SSM_NAME) + AZURE_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID) export AZURE_SUBSCRIPTION_ID fi if [ -z ${DD_PIPELINE_ID+x} ]; then diff --git a/test/new-e2e/examples/gcp_vm_test.go b/test/new-e2e/examples/gcp_vm_test.go new file mode 100644 index 0000000000000..1f897a87bbfe3 --- /dev/null +++ b/test/new-e2e/examples/gcp_vm_test.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package examples + +import ( + gcphost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/gcp/host/linux" + "testing" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" +) + +type gcpVMSuite struct { + e2e.BaseSuite[environments.Host] +} + +// TestGCPVMSuite runs tests for the VM interface to ensure its implementation is correct. +func TestGCPVMSuite(t *testing.T) { + suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(gcphost.ProvisionerNoAgentNoFakeIntake())} + e2e.Run(t, &gcpVMSuite{}, suiteParams...) +} + +func (v *gcpVMSuite) TestExecute() { + vm := v.Env().RemoteHost + + out, err := vm.Execute("whoami") + v.Require().NoError(err) + v.Require().NotEmpty(out) +} diff --git a/test/new-e2e/examples/gke_test.go b/test/new-e2e/examples/gke_test.go new file mode 100644 index 0000000000000..5f01481914652 --- /dev/null +++ b/test/new-e2e/examples/gke_test.go @@ -0,0 +1,48 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. +package examples + +import ( + "context" + gcpkubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/gcp/kubernetes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" + "testing" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" +) + +type gkeSuite struct { + e2e.BaseSuite[environments.Kubernetes] +} + +func TestGKESuite(t *testing.T) { + e2e.Run(t, &gkeSuite{}, e2e.WithProvisioner(gcpkubernetes.GKEProvisioner())) +} + +func (v *gkeSuite) TestGKE() { + v.T().Log("Running GKE test") + res, _ := v.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(context.TODO(), v1.ListOptions{}) + var clusterAgent corev1.Pod + containsClusterAgent := false + for _, pod := range res.Items { + if strings.Contains(pod.Name, "cluster-agent") { + containsClusterAgent = true + clusterAgent = pod + break + } + } + assert.True(v.T(), containsClusterAgent, "Cluster Agent not found") + + stdout, stderr, err := v.Env().KubernetesCluster.KubernetesClient. + PodExec("datadog", clusterAgent.Name, "cluster-agent", []string{"ls"}) + require.NoError(v.T(), err) + assert.Empty(v.T(), stderr) + assert.NotEmpty(v.T(), stdout) +} diff --git a/test/new-e2e/examples/vm_with_file_operations_test.go b/test/new-e2e/examples/vm_with_file_operations_test.go index 7c4a7e7fa54f7..02f8592388bd6 100644 --- a/test/new-e2e/examples/vm_with_file_operations_test.go +++ b/test/new-e2e/examples/vm_with_file_operations_test.go @@ -9,11 +9,16 @@ import ( "io/fs" "testing" + "golang.org/x/crypto/ssh" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/test-infra-definitions/components/os" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + + "github.com/stretchr/testify/assert" ) type vmSuiteWithFileOperations struct { @@ -22,10 +27,85 @@ type vmSuiteWithFileOperations struct { // TestVMSuiteWithFileOperations runs tests for the VM interface to ensure its implementation is correct. func TestVMSuiteWithFileOperations(t *testing.T) { - suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))} + suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(awshost.ProvisionerNoAgentNoFakeIntake(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))} e2e.Run(t, &vmSuiteWithFileOperations{}, suiteParams...) } +func assertExitCodeEqual(t *testing.T, err error, expected int, msgAndArgs ...interface{}) { + t.Helper() + var exitErr *ssh.ExitError + assert.ErrorAs(t, err, &exitErr) + assert.Equal(t, expected, exitErr.ExitStatus(), msgAndArgs) +} + +// TestCommandResults tests that commands return output or errors in the expected way +func (v *vmSuiteWithFileOperations) TestCommandResults() { + vm := v.Env().RemoteHost + + // successful command should return the output + out, err := vm.Execute("echo hello") + v.Assert().NoError(err) + v.Assert().Contains(out, "hello") + + // invalid commands should return an error + _, err = vm.Execute("not-a-command") + v.Assert().Error(err, "invalid command should return an error") + + // specific exit code should be returned + _, err = vm.Execute("exit 2") + v.Assert().Error(err, "non-zero exit code should return an error") + assertExitCodeEqual(v.T(), err, 2, "specific exit code should be returned") + + if vm.OSFamily == os.WindowsFamily { + v.testWindowsCommandResults() + } +} + +func (v *vmSuiteWithFileOperations) testWindowsCommandResults() { + vm := v.Env().RemoteHost + + // invalid commands should return an error + _, err := vm.Execute("not-a-command") + v.Assert().Error(err, "invalid command should return an error") + assertExitCodeEqual(v.T(), err, 1, "generic poewrshell error should return exit code 1") + + // native commands should return the exit status + _, err = vm.Execute("cmd.exe /c exit 2") + v.Assert().Error(err, "native command failure should return an error") + assertExitCodeEqual(v.T(), err, 2, "specific exit code should be returned") + + // a failing native command should continue to execute the rest of the command + // and the result should be from the lsat command + out, err := vm.Execute("cmd.exe /c exit 2; echo hello") + v.Assert().NoError(err, "result should come from the last command") + v.Assert().Contains(out, "hello", "native command failure should continue to execute the rest of the command") + + // Execute should auto-set $ErrorActionPreference to 'Stop', so + // a failing PowerShell cmdlet should fail immediately and not + // execute the rest of the command, so the output should not contain "hello" + out, err = vm.Execute(`Write-Error 'error'; echo he""llo`) + v.Assert().Error(err, "Execute should add ErrorActionPreference='Stop' to stop command execution on error") + v.Assert().NotContains(err.Error(), "hello") + v.Assert().NotContains(out, "hello") + assertExitCodeEqual(v.T(), err, 1, "failing PowerShell cmdlet should return exit code 1") + + // Execute should auto-set $ErrorActionPreference to 'Stop', so subcommands return an error + _, err = vm.Execute(`(Get-Service -Name 'not-a-service').Status`) + v.Assert().Error(err, "Execute should add ErrorActionPreference='Stop' to stop subcommand execution on error") + assertExitCodeEqual(v.T(), err, 1, "failing PowerShell cmdlet should return exit code 1") + // Sanity check default 'Continue' behavior does not return an error + _, err = vm.Execute(`$ErrorActionPreference='Continue'; (Get-Service -Name 'not-a-service').Status`) + v.Assert().NoError(err, "explicit ErrorActionPreference='Continue' should ignore subcommand error") + + // env vars should not leak between commands + _, err = vm.Execute(`$env:MYVAR1 = 'banana'`, client.WithEnvVariables(map[string]string{"MYVAR2": "orange"})) + v.Assert().NoError(err, "setting env vars should not return an error") + out, err = vm.Execute(`echo $env:MYVAR1; echo $env:MYVAR2`) + v.Assert().NoError(err) + v.Assert().NotContains(out, "banana", "env vars should not leak between commands") + v.Assert().NotContains(out, "orange", "env vars should not leak between commands") +} + func (v *vmSuiteWithFileOperations) TestFileOperations() { vm := v.Env().RemoteHost testFilePath := "test" diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index dd5a7b64ddc56..d294c91a18bc8 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -1,6 +1,8 @@ module github.com/DataDog/datadog-agent/test/new-e2e -go 1.22.0 +go 1.22.5 + +toolchain go1.22.6 // Do not upgrade Pulumi plugins to versions different from `test-infra-definitions`. // The plugin versions NEED to be aligned. @@ -9,6 +11,7 @@ go 1.22.0 replace ( github.com/DataDog/datadog-agent/comp/netflow/payload => ../../comp/netflow/payload github.com/DataDog/datadog-agent/pkg/proto => ../../pkg/proto + github.com/DataDog/datadog-agent/pkg/trace => ../../pkg/trace github.com/DataDog/datadog-agent/pkg/util/optional => ../../pkg/util/optional github.com/DataDog/datadog-agent/pkg/util/pointer => ../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../pkg/util/scrubber @@ -32,8 +35,8 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20240904143845-c9ee795ec752 - github.com/aws/aws-sdk-go-v2 v1.30.4 + github.com/DataDog/test-infra-definitions v0.0.0-20241002105811-0e2d625838fc + github.com/aws/aws-sdk-go-v2 v1.30.5 github.com/aws/aws-sdk-go-v2/config v1.27.19 github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 github.com/aws/aws-sdk-go-v2/service/eks v1.44.1 @@ -50,9 +53,8 @@ require ( github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0 github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 - github.com/pulumi/pulumi/sdk/v3 v3.130.0 + github.com/pulumi/pulumi/sdk/v3 v3.131.0 github.com/samber/lo v1.47.0 - github.com/sethvargo/go-retry v0.2.4 github.com/stretchr/testify v1.9.0 github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/crypto v0.26.0 @@ -88,11 +90,11 @@ require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.19 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.6 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.32.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4 // indirect github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.14 // indirect @@ -105,7 +107,7 @@ require ( github.com/aws/smithy-go v1.20.4 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 github.com/chai2010/gettext-go v1.0.2 // indirect github.com/charmbracelet/bubbles v0.18.0 // indirect github.com/charmbracelet/bubbletea v0.25.0 // indirect @@ -217,7 +219,6 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/texttheater/golang-levenshtein v1.0.1 // indirect github.com/tinylib/msgp v1.1.8 // indirect - github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect @@ -263,12 +264,19 @@ require ( sigs.k8s.io/yaml v1.3.0 // indirect ) -require github.com/hairyhenderson/go-codeowners v0.5.0 +require ( + github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 + github.com/DataDog/datadog-go/v5 v5.5.0 + github.com/digitalocean/go-libvirt v0.0.0-20240812180835-9c6c0a310c6c + github.com/hairyhenderson/go-codeowners v0.5.0 +) require ( - github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0 // indirect github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 // indirect github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/v2 v2.59.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0 // indirect + github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 // indirect + github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 // indirect ) diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index b8993893b924a..49d85c839031a 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -12,10 +12,12 @@ github.com/DataDog/datadog-api-client-go v1.16.0 h1:5jOZv1m98criCvYTa3qpW8Hzv301 github.com/DataDog/datadog-api-client-go v1.16.0/go.mod h1:PgrP2ABuJWL3Auw2iEkemAJ/r72ghG4DQQmb5sgnKW4= github.com/DataDog/datadog-api-client-go/v2 v2.27.0 h1:AGZj41frjnjMufQHQbJH2fzmifOs20wpmVDtIBCv33E= github.com/DataDog/datadog-api-client-go/v2 v2.27.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20240904143845-c9ee795ec752 h1:J+KnqV0jYvXvcN1LzRiRxYJo/nHGYsnBQ22VXIdAJD8= -github.com/DataDog/test-infra-definitions v0.0.0-20240904143845-c9ee795ec752/go.mod h1:QEQPOdzBcxZly/1KtAPFgF1R7Tp98FajB06gZ75E+/U= +github.com/DataDog/test-infra-definitions v0.0.0-20241002105811-0e2d625838fc h1:PtRMzcoFwEcxSAvqguHKiLDQshWfKwtFiKYYE3Z5Hsw= +github.com/DataDog/test-infra-definitions v0.0.0-20241002105811-0e2d625838fc/go.mod h1:nbSZa2alOY/vhOJRJNhMAbXNLARLT83ozjoP+s1hyRE= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= @@ -26,6 +28,7 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= @@ -45,8 +48,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= -github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= +github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= github.com/aws/aws-sdk-go-v2/config v1.27.19 h1:+DBS8gJP6VsxYkZ6UEV0/VsRM2rYpbQCYsosW9RRmeQ= @@ -55,18 +58,18 @@ github.com/aws/aws-sdk-go-v2/credentials v1.17.19 h1:R18G7nBBGLby51CFEqUBFF2IVl7 github.com/aws/aws-sdk-go-v2/credentials v1.17.19/go.mod h1:xr9kUMnaLTB866HItT6pg58JgiBP77fSQLBwIa//zk8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.6 h1:vVOuhRyslJ6T/HteG71ZWCTas1q2w6f0NKsNbkXHs/A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.6/go.mod h1:jimWaqLiT0sJGLh51dKCLLtExRYPtMU7MpxuCgtbkxg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 h1:DXFWyt7ymx/l1ygdyTTS0X923e+Q2wXIxConJzrgwc0= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12/go.mod h1:mVOr/LbvaNySK1/BTy4cBOCjhCNY2raWBwK4v+WR5J4= github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 h1:Rts0EZgdi3tneJMXp+uKrZHbMxQIu0y5O/2MG6a2+hY= github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2/go.mod h1:j0V2ahvdX3mGIyXQSe9vjdIQvSxz3uaMM0bR7Y+0WCE= -github.com/aws/aws-sdk-go-v2/service/ecr v1.32.1 h1:PxM8EHsv1sd9eWGamMQCvqBEjxytK5kAwjrxlfG3tac= -github.com/aws/aws-sdk-go-v2/service/ecr v1.32.1/go.mod h1:kdk+WJbHcGVbIlRQfSrKyuKkbWDdD8I9NScyS5vZ8eQ= +github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4 h1:nQAU2Yr+afkAvIV39mg7LrNYFNQP7ShwbmiJqx2fUKA= +github.com/aws/aws-sdk-go-v2/service/ecr v1.32.4/go.mod h1:keOS9j4fv5ASh7dV29lIpGw2QgoJwGFAyMU0uPvfax4= github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2 h1:DSFxt4HBQjlgKNMyYdME9cbB11FFi7umpTGbqJaS9nw= github.com/aws/aws-sdk-go-v2/service/ecs v1.45.2/go.mod h1:er8WHbgZAl17Dmu41ifKmUrV7JPpiQnRc+XSrnu4qR8= github.com/aws/aws-sdk-go-v2/service/eks v1.44.1 h1:onUAzZXDsyXzyrmOGw/9p8Csl1NZkTDEs4URZ8covUY= @@ -105,8 +108,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/charmbracelet/bubbles v0.18.0 h1:PYv1A036luoBGroX6VWjQIE9Syf2Wby2oOl/39KLfy0= @@ -137,6 +140,8 @@ github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/digitalocean/go-libvirt v0.0.0-20240812180835-9c6c0a310c6c h1:1y+eZhZOMDP86ErYQ7P7ebAvyhpr+HZhR5K6BlOkWoo= +github.com/digitalocean/go-libvirt v0.0.0-20240812180835-9c6c0a310c6c/go.mod h1:vhj0tZhS07ugaMVppAreQmBVHcqLwl5YR2DRu5/uJbY= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= @@ -211,6 +216,7 @@ github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwm github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= @@ -317,8 +323,6 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -384,15 +388,15 @@ github.com/pkg/term v1.1.0 h1:xIAAdCMh3QIAy+5FrE8Ad8XoDhEU4ufwbaSozViP9kk= github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= +github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 h1:vkHw5I/plNdTr435cARxCW6q9gc0S/Yxz7Mkd38pOb0= github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231/go.mod h1:murToZ2N9hNJzewjHBgfFdXhZKjY3z5cYC1VXk+lbFE= github.com/pulumi/esc v0.9.1 h1:HH5eEv8sgyxSpY5a8yePyqFXzA8cvBvapfH8457+mIs= @@ -401,22 +405,26 @@ github.com/pulumi/pulumi-aws/sdk/v6 v6.47.0 h1:DEbHd7krLB3p3Qr4PlAaEScA5mQR85jif github.com/pulumi/pulumi-aws/sdk/v6 v6.47.0/go.mod h1:gN/y6Gl/c6R2m1H0DlpyeyxpemtLJNhgHWcYz+vBPdo= github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0 h1:GknlrxIweg8X65VcxJaUVdZIHhclZjdzEWxsLGnMR2Y= github.com/pulumi/pulumi-awsx/sdk/v2 v2.14.0/go.mod h1:mB6jxy6GjMd1dmTA129GkHH5pyryYG/W0J1X2XznxW4= -github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0 h1:0QxN2F/yiylylNjYMqqXc5RQoKan/Pq/x1v43QaxE/c= -github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.57.0/go.mod h1:pv7oEJtA6Tn8dnE8/xya/yCQd6GU0Br9c9nHRkW9LiQ= +github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0 h1:qCpKZQECnZWXVMWfuTk6nfPfQoP+7zXPS5bHdeIh5Mc= +github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0/go.mod h1:ILyyA8nuYMWOcU7sRqRVmakNeY4hxog7K4nMCL+IOjE= github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 h1:MFOd6X9FPlixzriy14fBHv7pFCCh/mu1pwHtSSjqfJ4= github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0/go.mod h1:453Ff5wNscroYfq+zxME7Nbt7HdZv+dh0zLZwLyGBws= github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0 h1:ijcCyi+SPlJn3aIEb4p23FTk6fxjPLtVMhfkRaKp85A= github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.59.0/go.mod h1:yQXpYXNeGVBcygd5Be/fzf+1Jcg4kDLAMZY6UDtIZvQ= -github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0 h1:joRRPeYxXSaCGF7we0NNAMsOy7HJFd7O4cWAjmKveRI= -github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.58.0/go.mod h1:XFiuqPmtOASRrKU1q29sgzAuq9OcZ0bDzdBw9TSUyyo= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.59.0 h1:1S0sh1N+9MV2eUOidjS1LA01eZ6x6j+I7G8CE0RBb8E= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.59.0/go.mod h1:yVyaGAI0YLEcqfP/8Bmk9VgtRxE5JsBayO9i5QNneWg= +github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0 h1:mqs2dlpcyYn2LsA20bC8xN30YaVs7x8M6tC7BtDiY64= +github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.59.0/go.mod h1:OTv2GUMWUktcvdjXFRaAdJDW1f/SuRSCKHdVCcQrN7U= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0 h1:Q+we+HFtnNGkeXIhdWIKJZWJRwaIBUuMcZKG70YIYyw= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.60.0/go.mod h1:guTN5l9syK6v4+94APSi9np3rj1JPrPUEOG+B0dDaZE= github.com/pulumi/pulumi-command/sdk v1.0.1 h1:ZuBSFT57nxg/fs8yBymUhKLkjJ6qmyN3gNvlY/idiN0= github.com/pulumi/pulumi-command/sdk v1.0.1/go.mod h1:C7sfdFbUIoXKoIASfXUbP/U9xnwPfxvz8dBpFodohlA= github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5 h1:7OjAfgLz5PAy95ynbgPAlWls5WBe4I/QW/61TdPWRlQ= github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5/go.mod h1:XZKLFXbw13olxuztlWnmVUPYZp2a+BqzqhuMl0j/Ow8= github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 h1:NeCKFxyOLpAaG4pJDk7+ewnCuV2IbXR7PggYSNujOno= github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8/go.mod h1:ARGNnIZENIpDUVSX21JEQJKrESj/0u0r0iT61rpb86I= +github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 h1:PUH/sUbJmBmHjNFNthJ/dW2+riFuJV0FhrGAwuUuRIg= +github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1/go.mod h1:OmZeji3dNMwB1qldAlaQfcfJPc2BaZyweVGH7Ej4SJg= +github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 h1:21oSj+TKlKTzQcxN9Hik7iSNNHPUQXN4s3itOnahy/w= +github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0/go.mod h1:YaEZms1NgXFqGhObKVofcAeWXu2V+3t/BAXdHQZq7fU= github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 h1:VDX+hu+qK3fbf2FodgG5kfh2h1bHK0FKirW1YqKWkRc= github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1/go.mod h1:e69ohZtUePLLYNLXYgiOWp0FvRGg6ya/3fsq3o00nN0= github.com/pulumi/pulumi-libvirt/sdk v0.4.7 h1:/BBnqqx/Gbg2vINvJxXIVb58THXzw2lSqFqxlRSXH9M= @@ -425,8 +433,8 @@ github.com/pulumi/pulumi-random/sdk/v4 v4.16.3 h1:nlN42MRSIuDh5Pc5nLq4b0lwZaX2ZU github.com/pulumi/pulumi-random/sdk/v4 v4.16.3/go.mod h1:yRfWJSLEAVZvkwgXajr3S9OmFkAZTxfO44Ef2HfixXQ= github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1 h1:tXemWrzeVTqG8zq6hBdv1TdPFXjgZ+dob63a/6GlF1o= github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1/go.mod h1:hODo3iEmmXDFOXqPK+V+vwI0a3Ww7BLjs5Tgamp86Ng= -github.com/pulumi/pulumi/sdk/v3 v3.130.0 h1:gGJNd+akPqhZ+vrsZmAjSNJn6kGJkitjjkwrmIQMmn8= -github.com/pulumi/pulumi/sdk/v3 v3.130.0/go.mod h1:p1U24en3zt51agx+WlNboSOV8eLlPWYAkxMzVEXKbnY= +github.com/pulumi/pulumi/sdk/v3 v3.131.0 h1:w6+XFt4ajz7ZEoCBFo+oMmrQ4DYYBKtzuj/zBe/uyoo= +github.com/pulumi/pulumi/sdk/v3 v3.131.0/go.mod h1:J5kQEX8v87aeUhk6NdQXnjCo1DbiOnOiL3Sf2DuDda8= github.com/pulumiverse/pulumi-time/sdk v0.0.17 h1:JNYVLglXeMAjyD3upIwKZ9o7MnNo7kc3FVsgxs7bc+A= github.com/pulumiverse/pulumi-time/sdk v0.0.17/go.mod h1:NUa1zA74DF002WrM6iF111A6UjX9knPpXufVRvBwNyg= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= @@ -448,8 +456,6 @@ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6Ng github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= -github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -481,16 +487,14 @@ github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqa github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8= github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 h1:X9dsIWPuuEJlPX//UmRKophhOKCGXc46RVIGuttks68= -github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7/go.mod h1:UxoP3EypF8JfGEjAII8jx1q8rQyDnX8qdTCs/UQBVIE= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= @@ -503,6 +507,7 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= @@ -548,6 +553,7 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -563,6 +569,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= @@ -580,6 +587,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= @@ -595,7 +603,9 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -644,6 +654,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= diff --git a/test/new-e2e/pkg/components/datadog-installer/component.go b/test/new-e2e/pkg/components/datadog-installer/component.go index bcd5a936b8e64..f8ec377f93401 100644 --- a/test/new-e2e/pkg/components/datadog-installer/component.go +++ b/test/new-e2e/pkg/components/datadog-installer/component.go @@ -43,7 +43,8 @@ func (h *Component) Export(ctx *pulumi.Context, out *Output) error { // Configuration represents the Windows NewDefender configuration type Configuration struct { - URL string + URL string + AgentUser string } // Option is an optional function parameter type for Configuration options @@ -57,6 +58,14 @@ func WithInstallURL(url string) func(*Configuration) error { } } +// WithAgentUser specifies the ddagentuser for the installation +func WithAgentUser(user string) func(*Configuration) error { + return func(p *Configuration) error { + p.AgentUser = user + return nil + } +} + // NewConfig creates a default config func NewConfig(env config.Env, options ...Option) (*Configuration, error) { if env.PipelineID() != "" { @@ -79,14 +88,19 @@ func NewInstaller(e config.Env, host *remoteComp.Host, options ...Option) (*Comp return nil, err } + agentUserArg := "" + if params.AgentUser != "" { + agentUserArg = "DDAGENTUSER_NAME=" + params.AgentUser + } + hostInstaller, err := components.NewComponent(e, e.CommonNamer().ResourceName("datadog-installer"), func(comp *Component) error { comp.namer = e.CommonNamer().WithPrefix("datadog-installer") comp.Host = host _, err = host.OS.Runner().Command(comp.namer.ResourceName("install"), &command.Args{ Create: pulumi.Sprintf(` -Exit (Start-Process -Wait msiexec -PassThru -ArgumentList '/qn /i %s').ExitCode -`, params.URL), +Exit (Start-Process -Wait msiexec -PassThru -ArgumentList '/qn /i %s %s').ExitCode +`, params.URL, agentUserArg), Delete: pulumi.Sprintf(` $installerList = Get-ItemProperty "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\*" | Where-Object {$_.DisplayName -like 'Datadog Installer'} if (($installerList | measure).Count -ne 1) { diff --git a/test/new-e2e/pkg/environments/aws/host/host.go b/test/new-e2e/pkg/environments/aws/host/host.go index 7652d9b846576..4668905ac83a8 100644 --- a/test/new-e2e/pkg/environments/aws/host/host.go +++ b/test/new-e2e/pkg/environments/aws/host/host.go @@ -261,7 +261,8 @@ func Run(ctx *pulumi.Context, env *environments.Host, runParams RunParams) error // todo: add agent once updater installs agent on bootstrap env.Agent = nil } else if params.agentOptions != nil { - agent, err := agent.NewHostAgent(&awsEnv, host, params.agentOptions...) + agentOptions := append(params.agentOptions, agentparams.WithTags([]string{fmt.Sprintf("stackid:%s", ctx.Stack())})) + agent, err := agent.NewHostAgent(&awsEnv, host, agentOptions...) if err != nil { return err } diff --git a/test/new-e2e/pkg/environments/aws/host/windows/host.go b/test/new-e2e/pkg/environments/aws/host/windows/host.go index 7fcb44f05da53..1fd5885a88c4d 100644 --- a/test/new-e2e/pkg/environments/aws/host/windows/host.go +++ b/test/new-e2e/pkg/environments/aws/host/windows/host.go @@ -8,7 +8,7 @@ package winawshost import ( "fmt" - installer "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components/datadog-installer" + "github.com/DataDog/test-infra-definitions/components/activedirectory" "github.com/DataDog/test-infra-definitions/components/datadog/agent" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" @@ -18,6 +18,8 @@ import ( "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + installer "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components/datadog-installer" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" @@ -202,7 +204,8 @@ func Run(ctx *pulumi.Context, env *environments.WindowsHost, params *Provisioner } if params.agentOptions != nil { - agent, err := agent.NewHostAgent(&awsEnv, host, params.agentOptions...) + agentOptions := append(params.agentOptions, agentparams.WithTags([]string{fmt.Sprintf("stackid:%s", ctx.Stack())})) + agent, err := agent.NewHostAgent(&awsEnv, host, agentOptions...) if err != nil { return err } diff --git a/test/new-e2e/pkg/environments/gcp/host/linux/host.go b/test/new-e2e/pkg/environments/gcp/host/linux/host.go new file mode 100644 index 0000000000000..0e479d8a51bdf --- /dev/null +++ b/test/new-e2e/pkg/environments/gcp/host/linux/host.go @@ -0,0 +1,124 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package gcphost contains the definition of the GCP Host environment. +package gcphost + +import ( + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/test-infra-definitions/resources/gcp" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/compute" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + + "github.com/DataDog/test-infra-definitions/components/datadog/agent" + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/test-infra-definitions/components/datadog/updater" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +const ( + provisionerBaseID = "gcp-vm-" + defaultVMName = "vm" +) + +// Provisioner creates a VM environment with an VM, a FakeIntake and a Host Agent configured to talk to each other. +// FakeIntake and Agent creation can be deactivated by using [WithoutFakeIntake] and [WithoutAgent] options. +func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] { + // We need to build params here to be able to use params.name in the provisioner name + params := GetProvisionerParams(opts...) + + provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Host) error { + // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times. + // and it's easy to forget about it, leading to hard-to-debug issues. + params := GetProvisionerParams(opts...) + return Run(ctx, env, RunParams{ProvisionerParams: params}) + }, params.extraConfigParams) + + return provisioner +} + +// Run deploys an environment given a pulumi.Context +func Run(ctx *pulumi.Context, env *environments.Host, runParams RunParams) error { + var gcpEnv gcp.Environment + if runParams.Environment == nil { + var err error + gcpEnv, err = gcp.NewEnvironment(ctx) + if err != nil { + return err + } + } else { + gcpEnv = *runParams.Environment + } + params := runParams.ProvisionerParams + + host, err := compute.NewVM(gcpEnv, params.name, params.instanceOptions...) + if err != nil { + return err + } + err = host.Export(ctx, &env.RemoteHost.HostOutput) + if err != nil { + return err + } + + // Create FakeIntake if required + if params.fakeintakeOptions != nil { + fakeIntake, err := fakeintake.NewVMInstance(gcpEnv, params.fakeintakeOptions...) + if err != nil { + return err + } + err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput) + if err != nil { + return err + } + + // Normally if FakeIntake is enabled, Agent is enabled, but just in case + if params.agentOptions != nil { + // Prepend in case it's overridden by the user + newOpts := []agentparams.Option{agentparams.WithFakeintake(fakeIntake)} + params.agentOptions = append(newOpts, params.agentOptions...) + } + } else { + // Suite inits all fields by default, so we need to explicitly set it to nil + env.FakeIntake = nil + } + if !params.installUpdater { + // Suite inits all fields by default, so we need to explicitly set it to nil + env.Updater = nil + } + + // Create Agent if required + if params.installUpdater && params.agentOptions != nil { + updater, err := updater.NewHostUpdater(&gcpEnv, host, params.agentOptions...) + if err != nil { + return err + } + + err = updater.Export(ctx, &env.Updater.HostUpdaterOutput) + if err != nil { + return err + } + // todo: add agent once updater installs agent on bootstrap + env.Agent = nil + } else if params.agentOptions != nil { + agent, err := agent.NewHostAgent(&gcpEnv, host, params.agentOptions...) + if err != nil { + return err + } + + err = agent.Export(ctx, &env.Agent.HostAgentOutput) + if err != nil { + return err + } + + env.Agent.ClientOptions = params.agentClientOptions + } else { + // Suite inits all fields by default, so we need to explicitly set it to nil + env.Agent = nil + } + + return nil +} diff --git a/test/new-e2e/pkg/environments/gcp/host/linux/params.go b/test/new-e2e/pkg/environments/gcp/host/linux/params.go new file mode 100644 index 0000000000000..442fd28b889b0 --- /dev/null +++ b/test/new-e2e/pkg/environments/gcp/host/linux/params.go @@ -0,0 +1,152 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package gcphost + +import ( + "fmt" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/test-infra-definitions/resources/gcp" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/compute" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake" +) + +// ProvisionerParams is a set of parameters for the Provisioner. +type ProvisionerParams struct { + name string + + instanceOptions []compute.VMOption + agentOptions []agentparams.Option + agentClientOptions []agentclientparams.Option + fakeintakeOptions []fakeintake.Option + extraConfigParams runner.ConfigMap + installUpdater bool +} + +func newProvisionerParams() *ProvisionerParams { + // We use nil arrays to decide if we should create or not + return &ProvisionerParams{ + name: defaultVMName, + instanceOptions: []compute.VMOption{}, + agentOptions: []agentparams.Option{}, + agentClientOptions: []agentclientparams.Option{}, + fakeintakeOptions: []fakeintake.Option{}, + extraConfigParams: runner.ConfigMap{}, + } +} + +// GetProvisionerParams return ProvisionerParams from options opts setup +func GetProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams { + params := newProvisionerParams() + err := optional.ApplyOptions(params, opts) + if err != nil { + panic(fmt.Errorf("unable to apply ProvisionerOption, err: %w", err)) + } + return params +} + +// ProvisionerOption is a provisioner option. +type ProvisionerOption func(*ProvisionerParams) error + +// WithName sets the name of the provisioner. +func WithName(name string) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.name = name + return nil + } +} + +// WithInstanceOptions adds options to the EC2 VM. +func WithInstanceOptions(opts ...compute.VMOption) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.instanceOptions = append(params.instanceOptions, opts...) + return nil + } +} + +// WithAgentOptions adds options to the Agent. +func WithAgentOptions(opts ...agentparams.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.agentOptions = append(params.agentOptions, opts...) + return nil + } +} + +// WithAgentClientOptions adds options to the Agent client. +func WithAgentClientOptions(opts ...agentclientparams.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.agentClientOptions = append(params.agentClientOptions, opts...) + return nil + } +} + +// WithFakeIntakeOptions adds options to the FakeIntake. +func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.fakeintakeOptions = append(params.fakeintakeOptions, opts...) + return nil + } +} + +// WithExtraConfigParams adds extra config parameters to the ConfigMap. +func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.extraConfigParams = configMap + return nil + } +} + +// WithoutFakeIntake disables the creation of the FakeIntake. +func WithoutFakeIntake() ProvisionerOption { + return func(params *ProvisionerParams) error { + params.fakeintakeOptions = nil + return nil + } +} + +// WithoutAgent disables the creation of the Agent. +func WithoutAgent() ProvisionerOption { + return func(params *ProvisionerParams) error { + params.agentOptions = nil + return nil + } +} + +// WithUpdater installs the agent through the updater. +func WithUpdater() ProvisionerOption { + return func(params *ProvisionerParams) error { + params.installUpdater = true + return nil + } +} + +// ProvisionerNoAgentNoFakeIntake wraps Provisioner with hardcoded WithoutAgent and WithoutFakeIntake options. +func ProvisionerNoAgentNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] { + mergedOpts := make([]ProvisionerOption, 0, len(opts)+2) + mergedOpts = append(mergedOpts, opts...) + mergedOpts = append(mergedOpts, WithoutAgent(), WithoutFakeIntake()) + + return Provisioner(mergedOpts...) +} + +// ProvisionerNoFakeIntake wraps Provisioner with hardcoded WithoutFakeIntake option. +func ProvisionerNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] { + mergedOpts := make([]ProvisionerOption, 0, len(opts)+1) + mergedOpts = append(mergedOpts, opts...) + mergedOpts = append(mergedOpts, WithoutFakeIntake()) + + return Provisioner(mergedOpts...) +} + +// RunParams is a set of parameters for the Run function. +type RunParams struct { + Environment *gcp.Environment + ProvisionerParams *ProvisionerParams +} diff --git a/test/new-e2e/pkg/environments/gcp/kubernetes/gke.go b/test/new-e2e/pkg/environments/gcp/kubernetes/gke.go new file mode 100644 index 0000000000000..10c315fb7b254 --- /dev/null +++ b/test/new-e2e/pkg/environments/gcp/kubernetes/gke.go @@ -0,0 +1,94 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package gcpkubernetes contains the provisioner for Google Kubernetes Engine (GKE) +package gcpkubernetes + +import ( + "github.com/DataDog/test-infra-definitions/resources/gcp" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/gke" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + + "github.com/DataDog/test-infra-definitions/components/datadog/agent/helm" + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" +) + +const ( + provisionerBaseID = "gcp-gke" +) + +// GKEProvisioner creates a new provisioner for GKE on GCP +func GKEProvisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Kubernetes] { + // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times. + // and it's easy to forget about it, leading to hard to debug issues. + params := newProvisionerParams() + _ = optional.ApplyOptions(params, opts) + + provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Kubernetes) error { + // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times. + // and it's easy to forget about it, leading to hard to debug issues. + params := newProvisionerParams() + _ = optional.ApplyOptions(params, opts) + + return GKERunFunc(ctx, env, params) + }, params.extraConfigParams) + + return provisioner +} + +// GKERunFunc is the run function for GKE provisioner +func GKERunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *ProvisionerParams) error { + gcpEnv, err := gcp.NewEnvironment(ctx) + if err != nil { + return err + } + + // Create the cluster + cluster, err := gke.NewGKECluster(gcpEnv, params.gkeOptions...) + if err != nil { + return err + } + err = cluster.Export(ctx, &env.KubernetesCluster.ClusterOutput) + if err != nil { + return err + } + + agentOptions := params.agentOptions + + // Deploy a fakeintake + if params.fakeintakeOptions != nil { + fakeIntake, err := fakeintake.NewVMInstance(gcpEnv, params.fakeintakeOptions...) + if err != nil { + return err + } + err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput) + if err != nil { + return err + } + agentOptions = append(agentOptions, kubernetesagentparams.WithFakeintake(fakeIntake)) + + } else { + env.FakeIntake = nil + } + + if params.agentOptions != nil { + agent, err := helm.NewKubernetesAgent(&gcpEnv, params.name, cluster.KubeProvider, agentOptions...) + if err != nil { + return err + } + err = agent.Export(ctx, &env.Agent.KubernetesAgentOutput) + if err != nil { + return err + } + } else { + env.Agent = nil + } + return nil +} diff --git a/test/new-e2e/pkg/environments/gcp/kubernetes/params.go b/test/new-e2e/pkg/environments/gcp/kubernetes/params.go new file mode 100644 index 0000000000000..d42a5dac75f9e --- /dev/null +++ b/test/new-e2e/pkg/environments/gcp/kubernetes/params.go @@ -0,0 +1,100 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package gcpkubernetes contains the provisioner for Google Kubernetes Engine (GKE) +package gcpkubernetes + +import ( + "fmt" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/gke" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" + + "github.com/DataDog/test-infra-definitions/common/config" + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake" + + "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes" +) + +// ProvisionerParams contains all the parameters needed to create the environment +type ProvisionerParams struct { + name string + fakeintakeOptions []fakeintake.Option + agentOptions []kubernetesagentparams.Option + gkeOptions []gke.Option + workloadAppFuncs []WorkloadAppFunc + extraConfigParams runner.ConfigMap +} + +func newProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams { + params := &ProvisionerParams{ + name: "gke", + fakeintakeOptions: []fakeintake.Option{}, + agentOptions: []kubernetesagentparams.Option{}, + workloadAppFuncs: []WorkloadAppFunc{}, + } + err := optional.ApplyOptions(params, opts) + if err != nil { + panic(fmt.Sprintf("failed to apply options: %v", err)) + } + return params +} + +// ProvisionerOption is a function that modifies the ProvisionerParams +type ProvisionerOption func(*ProvisionerParams) error + +// WithName sets the name of the provisioner +func WithName(name string) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.name = name + return nil + } +} + +// WithAgentOptions adds options to the agent +func WithAgentOptions(opts ...kubernetesagentparams.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.agentOptions = opts + return nil + } +} + +// WithFakeIntakeOptions adds options to the fake intake +func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.fakeintakeOptions = opts + return nil + } +} + +// WithGKEOptions adds options to the cluster +func WithGKEOptions(opts ...gke.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.gkeOptions = opts + return nil + } +} + +// WithExtraConfigParams adds extra config parameters to the environment +func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.extraConfigParams = configMap + return nil + } +} + +// WorkloadAppFunc is a function that deploys a workload app to a kube provider +type WorkloadAppFunc func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error) + +// WithWorkloadApp adds a workload app to the environment +func WithWorkloadApp(appFunc WorkloadAppFunc) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.workloadAppFuncs = append(params.workloadAppFuncs, appFunc) + return nil + } +} diff --git a/test/new-e2e/pkg/runner/configmap.go b/test/new-e2e/pkg/runner/configmap.go index 09bb63ee39d16..e69223d164864 100644 --- a/test/new-e2e/pkg/runner/configmap.go +++ b/test/new-e2e/pkg/runner/configmap.go @@ -12,6 +12,7 @@ import ( commonconfig "github.com/DataDog/test-infra-definitions/common/config" infraaws "github.com/DataDog/test-infra-definitions/resources/aws" infraazure "github.com/DataDog/test-infra-definitions/resources/azure" + infragcp "github.com/DataDog/test-infra-definitions/resources/gcp" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters" @@ -49,6 +50,13 @@ const ( AzurePrivateKeyPath = commonconfig.DDInfraConfigNamespace + ":" + infraazure.DDInfraDefaultPrivateKeyPath // AzurePrivateKeyPassword pulumi config paramater name AzurePrivateKeyPassword = commonconfig.DDInfraConfigNamespace + ":" + infraazure.DDInfraDefaultPrivateKeyPassword + + // GCPPublicKeyPath pulumi config paramater name + GCPPublicKeyPath = commonconfig.DDInfraConfigNamespace + ":" + infragcp.DDInfraDefaultPublicKeyPath + // GCPPrivateKeyPath pulumi config paramater name + GCPPrivateKeyPath = commonconfig.DDInfraConfigNamespace + ":" + infragcp.DDInfraDefaultPrivateKeyPath + // GCPPrivateKeyPassword pulumi config paramater name + GCPPrivateKeyPassword = commonconfig.DDInfraConfigNamespace + ":" + infragcp.DDInfraDefaultPrivateKeyPassword ) // ConfigMap type alias to auto.ConfigMap @@ -111,8 +119,8 @@ func BuildStackParameters(profile Profile, scenarioConfig ConfigMap) (ConfigMap, cm.Set(InfraEnvironmentVariables, profile.EnvironmentNames(), false) params := map[parameters.StoreKey][]string{ parameters.KeyPairName: {AWSKeyPairName}, - parameters.PublicKeyPath: {AWSPublicKeyPath, AzurePublicKeyPath}, - parameters.PrivateKeyPath: {AWSPrivateKeyPath, AzurePrivateKeyPath}, + parameters.PublicKeyPath: {AWSPublicKeyPath, AzurePublicKeyPath, GCPPublicKeyPath}, + parameters.PrivateKeyPath: {AWSPrivateKeyPath, AzurePrivateKeyPath, GCPPrivateKeyPath}, parameters.ExtraResourcesTags: {InfraExtraResourcesTags}, parameters.PipelineID: {AgentPipelineID}, parameters.CommitSHA: {AgentCommitSHA}, @@ -132,7 +140,7 @@ func BuildStackParameters(profile Profile, scenarioConfig ConfigMap) (ConfigMap, secretParams := map[parameters.StoreKey][]string{ parameters.APIKey: {AgentAPIKey}, parameters.APPKey: {AgentAPPKey}, - parameters.PrivateKeyPassword: {AWSPrivateKeyPassword, AzurePrivateKeyPassword}, + parameters.PrivateKeyPassword: {AWSPrivateKeyPassword, AzurePrivateKeyPassword, GCPPrivateKeyPassword}, } for storeKey, configMapKeys := range secretParams { diff --git a/test/new-e2e/pkg/runner/configmap_test.go b/test/new-e2e/pkg/runner/configmap_test.go index 19c6dfaf32f30..6835eb9ea1fb9 100644 --- a/test/new-e2e/pkg/runner/configmap_test.go +++ b/test/new-e2e/pkg/runner/configmap_test.go @@ -42,6 +42,9 @@ func Test_BuildStackParameters(t *testing.T) { "ddinfra:az/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false}, "ddinfra:az/defaultPrivateKeyPath": auto.ConfigValue{Value: "private_key_path", Secret: false}, "ddinfra:az/defaultPrivateKeyPassword": auto.ConfigValue{Value: "private_key_password", Secret: true}, + "ddinfra:gcp/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false}, + "ddinfra:gcp/defaultPrivateKeyPath": auto.ConfigValue{Value: "private_key_path", Secret: false}, + "ddinfra:gcp/defaultPrivateKeyPassword": auto.ConfigValue{Value: "private_key_password", Secret: true}, "ddagent:pipeline_id": auto.ConfigValue{Value: "pipeline_id", Secret: false}, "ddagent:commit_sha": auto.ConfigValue{Value: "commit_sha", Secret: false}, }, configMap) diff --git a/test/new-e2e/pkg/runner/local_profile.go b/test/new-e2e/pkg/runner/local_profile.go index 633e0ccf0972c..2cba95a568cd2 100644 --- a/test/new-e2e/pkg/runner/local_profile.go +++ b/test/new-e2e/pkg/runner/local_profile.go @@ -19,6 +19,7 @@ import ( var defaultLocalEnvironments = map[string]string{ "aws": "agent-sandbox", "az": "agent-sandbox", + "gcp": "agent-sandbox", } // NewLocalProfile creates a new local profile diff --git a/test/new-e2e/pkg/utils/e2e/client/host.go b/test/new-e2e/pkg/utils/e2e/client/host.go index 31a14f32aaf64..16d597805dd1a 100644 --- a/test/new-e2e/pkg/utils/e2e/client/host.go +++ b/test/new-e2e/pkg/utils/e2e/client/host.go @@ -40,7 +40,7 @@ const ( sshMaxRetries = 20 ) -type buildCommandFn func(host *Host, command string, envVars EnvVar) string +type buildCommandFn func(command string, envVars EnvVar) string type convertPathSeparatorFn func(string) string @@ -122,13 +122,13 @@ func (h *Host) Execute(command string, options ...ExecuteOption) (string, error) if err != nil { return "", err } - command = h.buildCommand(h, command, params.EnvVariables) + command = h.buildCommand(command, params.EnvVariables) return h.executeAndReconnectOnError(command) } func (h *Host) executeAndReconnectOnError(command string) (string, error) { scrubbedCommand := h.scrubber.ScrubLine(command) // scrub the command in case it contains secrets - h.context.T().Logf("Executing command `%s`", scrubbedCommand) + h.context.T().Logf("%s - %s - Executing command `%s`", time.Now().Format("02-01-2006 15:04:05"), h.context.T().Name(), scrubbedCommand) stdout, err := execute(h.client, command) if err != nil && strings.Contains(err.Error(), "failed to create session:") { err = h.Reconnect() @@ -487,12 +487,13 @@ func buildCommandFactory(osFamily oscomp.Family) buildCommandFn { return buildCommandOnLinuxAndMacOS } -func buildCommandOnWindows(h *Host, command string, envVar EnvVar) string { +func buildCommandOnWindows(command string, envVar EnvVar) string { cmd := "" - // Set $ErrorActionPreference to 'Stop' to cause PowerShell to stop on an erorr instead + // Set $ErrorActionPreference to 'Stop' to cause PowerShell to stop on an error instead // of the default 'Continue' behavior. - // This also ensures that Execute() will return an error when the command fails. + // This also ensures that Execute() will return an error when a command fails. + // Note that this only applies to PowerShell commands, not to external commands or native binaries. // // For example, if the command is (Get-Service -Name ddnpm).Status and the service does not exist, // then by default the command will print an error but the exit code will be 0 and Execute() will not return an error. @@ -506,26 +507,31 @@ func buildCommandOnWindows(h *Host, command string, envVar EnvVar) string { // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_preference_variables#erroractionpreference cmd += "$ErrorActionPreference='Stop'; " - envVarSave := map[string]string{} for envName, envValue := range envVar { - previousEnvVar, err := h.executeAndReconnectOnError(fmt.Sprintf("$env:%s", envName)) - if err != nil || previousEnvVar == "" { - previousEnvVar = "null" - } - envVarSave[envName] = previousEnvVar - cmd += fmt.Sprintf("$env:%s='%s'; ", envName, envValue) } - cmd += fmt.Sprintf("%s; ", command) - - for envName := range envVar { - cmd += fmt.Sprintf("$env:%s='%s'; ", envName, envVarSave[envName]) - } + // By default, powershell will just exit with 0 or 1, so we call exit to preserve + // the exit code of the command provided by the caller. + // The caller's command may not modify LASTEXITCODE, so manually reset it first, + // then only call exit if the command provided by the caller fails. + // + // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_automatic_variables?#lastexitcode + // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_powershell_exe?#-command + cmd += fmt.Sprintf("$LASTEXITCODE=0; %s; if (-not $?) { exit $LASTEXITCODE }", command) + // NOTE: Do not add more commands after the command provided by the caller. + // + // `$ErrorActionPreference`='Stop' only applies to PowerShell commands, not to + // external commands or native binaries, thus later commands will still be executed. + // Additional commands will overwrite the exit code of the command provided by + // the caller which may cause errors to be missed/ignored. + // If it becomes necessary to run more commands after the command provided by the + // caller, we will need to find a way to ensure that the exit code of the command + // provided by the caller is preserved. return cmd } -func buildCommandOnLinuxAndMacOS(_ *Host, command string, envVar EnvVar) string { +func buildCommandOnLinuxAndMacOS(command string, envVar EnvVar) string { cmd := "" for envName, envValue := range envVar { cmd += fmt.Sprintf("%s='%s' ", envName, envValue) diff --git a/test/new-e2e/pkg/utils/infra/retriable_errors.go b/test/new-e2e/pkg/utils/infra/retriable_errors.go index b8d5c27b53195..7d28f17006460 100644 --- a/test/new-e2e/pkg/utils/infra/retriable_errors.go +++ b/test/new-e2e/pkg/utils/infra/retriable_errors.go @@ -24,22 +24,30 @@ func getKnownErrors() []knownError { // Add here errors that are known to be flakes and that should be retried return []knownError{ { - errorMessage: "i/o timeout", + errorMessage: `i\/o timeout`, retryType: ReCreate, }, { // https://datadoghq.atlassian.net/browse/ADXT-1 - errorMessage: "failed attempts: dial tcp :22: connect: connection refused", + errorMessage: `failed attempts: dial tcp :22: connect: connection refused`, retryType: ReCreate, }, { // https://datadoghq.atlassian.net/browse/ADXT-295 - errorMessage: "Resource provider reported that the resource did not exist while updating", + errorMessage: `Resource provider reported that the resource did not exist while updating`, retryType: ReCreate, }, { // https://datadoghq.atlassian.net/browse/ADXT-558 - errorMessage: "Process exited with status 2: running \" sudo cloud-init status --wait\"", + errorMessage: `Process exited with status 2: running " sudo cloud-init status --wait"`, + retryType: ReCreate, + }, + { + errorMessage: `waiting for ECS Service .+fakeintake-ecs.+ create: timeout while waiting for state to become 'tfSTABLE'`, + retryType: ReCreate, + }, + { + errorMessage: `error while waiting for fakeintake`, retryType: ReCreate, }, } diff --git a/test/new-e2e/pkg/utils/infra/stack_manager.go b/test/new-e2e/pkg/utils/infra/stack_manager.go index 2a580fd651d7d..15d9e44c2139b 100644 --- a/test/new-e2e/pkg/utils/infra/stack_manager.go +++ b/test/new-e2e/pkg/utils/infra/stack_manager.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "os" + "regexp" "runtime" "strings" "sync" @@ -56,16 +57,16 @@ var ( initStackManager sync.Once ) -// RetryStrategy is a function that given the current error and the number of retries, returns the type of retry to perform and a list of options to modify the configuration -type RetryStrategy func(error, int) (RetryType, []GetStackOption) +// RetryStrategyFromFn is a function that given the current error and the number of retries, returns the type of retry to perform and a list of options to modify the configuration +type RetryStrategyFromFn func(error, int) (RetryType, []GetStackOption) // StackManager handles type StackManager struct { stacks *safeStackMap knownErrors []knownError - // RetryStrategy defines how to handle retries. By default points to StackManager.getRetryStrategyFrom but can be overridden - RetryStrategy RetryStrategy + // GetRetryStrategyFrom defines how to handle retries. By default points to StackManager.getRetryStrategyFrom but can be overridden + GetRetryStrategyFrom RetryStrategyFromFn } type safeStackMap struct { @@ -120,7 +121,7 @@ func newStackManager() (*StackManager, error) { stacks: newSafeStackMap(), knownErrors: getKnownErrors(), } - sm.RetryStrategy = sm.getRetryStrategyFrom + sm.GetRetryStrategyFrom = sm.getRetryStrategyFrom return sm, nil } @@ -523,7 +524,7 @@ func (sm *StackManager) getStack(ctx context.Context, name string, deployFunc pu } } - retryStrategy, changedOpts := sm.RetryStrategy(upError, upCount) + retryStrategy, changedOpts := sm.GetRetryStrategyFrom(upError, upCount) sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", name), upError.Error(), []string{"operation:up", "result:fail", fmt.Sprintf("retry:%s", retryStrategy), fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", upCount)}) switch retryStrategy { @@ -619,7 +620,11 @@ func (sm *StackManager) getRetryStrategyFrom(err error, upCount int) (RetryType, } for _, knownError := range sm.knownErrors { - if strings.Contains(err.Error(), knownError.errorMessage) { + isMatch, err := regexp.MatchString(knownError.errorMessage, err.Error()) + if err != nil { + fmt.Printf("Error matching regex %s: %v\n", knownError.errorMessage, err) + } + if isMatch { return knownError.retryType, nil } } diff --git a/test/new-e2e/pkg/utils/infra/stack_manager_test.go b/test/new-e2e/pkg/utils/infra/stack_manager_test.go index c10aaba87ee4e..56de15ce98455 100644 --- a/test/new-e2e/pkg/utils/infra/stack_manager_test.go +++ b/test/new-e2e/pkg/utils/infra/stack_manager_test.go @@ -8,13 +8,15 @@ package infra import ( "context" + "errors" "fmt" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/common" "io" "strings" "testing" "time" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/common" + "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1" "github.com/pulumi/pulumi/sdk/v3/go/auto" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" @@ -225,6 +227,50 @@ func TestStackManager(t *testing.T) { assert.Contains(t, mockDatadogEventSender.events[1].Title, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", stackName)) assert.Contains(t, mockDatadogEventSender.events[2].Title, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack up", stackName)) }) + + t.Run("should-return-retry-strategy-on-retriable-errors", func(t *testing.T) { + t.Parallel() + + type testError struct { + name string + errMessage string + expectedRetryType RetryType + } + + testErrors := []testError{ + { + name: "timeout", + errMessage: "i/o timeout", + expectedRetryType: ReCreate, + }, + { + name: "connection-refused", + errMessage: "failed attempts: dial tcp :22: connect: connection refused", + expectedRetryType: ReCreate, + }, + { + name: "resource-not-exist", + errMessage: "Resource provider reported that the resource did not exist while updating", + expectedRetryType: ReCreate, + }, + { + name: "cloud-init-timeout", + errMessage: "Process exited with status 2: running \" sudo cloud-init status --wait\"", + expectedRetryType: ReCreate, + }, + { + name: "ecs-fakeintake-timeout", + errMessage: "waiting for ECS Service (arn:aws:ecs:us-east-1:669783387624:service/fakeintake-ecs/ci-633219896-4670-e2e-dockersuite-80f62edf7bcc6194-aws-fakeintake-dockervm-srv) create: timeout while waiting for state to become 'tfSTABLE' (last state: 'tfPENDING', timeout: 20m0s)", + expectedRetryType: ReCreate, + }, + } + + for _, te := range testErrors { + err := errors.New(te.errMessage) + retryType, _ := stackManager.getRetryStrategyFrom(err, 0) + assert.Equal(t, te.expectedRetryType, retryType, te.name) + } + }) } func filterRetryOnErrorLogs(logs []string) []string { diff --git a/test/new-e2e/system-probe/connector/metric/metric.go b/test/new-e2e/system-probe/connector/metric/metric.go index 4c674154402cd..dc90a56bfe270 100644 --- a/test/new-e2e/system-probe/connector/metric/metric.go +++ b/test/new-e2e/system-probe/connector/metric/metric.go @@ -15,6 +15,7 @@ import ( "os" "github.com/DataDog/datadog-api-client-go/v2/api/datadog" + "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1" "github.com/DataDog/datadog-api-client-go/v2/api/datadogV2" ) @@ -41,3 +42,27 @@ func SubmitExecutionMetric(metricBody datadogV2.MetricPayload) error { return nil } + +// SubmitExecutionEvent accepts events and sends it to Datadog. +func SubmitExecutionEvent(eventBody datadogV1.EventCreateRequest) error { + if _, ok := os.LookupEnv("DD_API_KEY"); !ok { + fmt.Fprintf(os.Stderr, "skipping sending metric because DD_API_KEY not present") + return nil + } + + ctx := datadog.NewDefaultContext(context.Background()) + configuration := datadog.NewConfiguration() + apiClient := datadog.NewAPIClient(configuration) + api := datadogV1.NewEventsApi(apiClient) + resp, r, err := api.CreateEvent(ctx, eventBody) + + if err != nil { + fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) + return fmt.Errorf("error when calling `MetricsApi.SubmitMetrics`: %v", err) + } + + responseContent, _ := json.MarshalIndent(resp, "", " ") + fmt.Fprintf(os.Stdout, "Response from `MetricsApi.SubmitMetrics`:\n%s\n", responseContent) + + return nil +} diff --git a/test/new-e2e/system-probe/errors.go b/test/new-e2e/system-probe/errors.go index 2fb1b57595e9a..297f80bb0184a 100644 --- a/test/new-e2e/system-probe/errors.go +++ b/test/new-e2e/system-probe/errors.go @@ -13,13 +13,16 @@ import ( "log" "os" "path" + "regexp" "strings" "time" "github.com/DataDog/datadog-api-client-go/api/v1/datadog" "github.com/DataDog/datadog-api-client-go/v2/api/datadogV2" - "github.com/sethvargo/go-retry" + "github.com/pulumi/pulumi/sdk/v3/go/auto" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" "github.com/DataDog/datadog-agent/test/new-e2e/system-probe/connector/metric" ) @@ -27,6 +30,7 @@ const ( // bitmap of actions to take for an error retryStack = 0x1 // 0b01 emitMetric = 0x2 // 0b10 + changeAZ = 0x4 // 0b100 aria2cMissingStatusErrorStr = "error: wait: remote command exited without exit status or exit signal: running \" aria2c" @@ -43,6 +47,8 @@ const ( ec2StateChangeTimeoutError ioTimeout tcp22ConnectionRefused + ec2InstanceCreateTimeout + ddAgentRepoFailure ) type handledError struct { @@ -66,7 +72,7 @@ var handledErrorsLs = []handledError{ errorType: insufficientCapacityError, errorString: "InsufficientInstanceCapacity", metric: "insufficient-capacity", - action: retryStack | emitMetric, + action: retryStack | emitMetric | changeAZ, }, // Retry when ssh thinks aria2c exited without status. This may happen // due to network connectivity issues if ssh keepalive mecahnism fails. @@ -80,7 +86,7 @@ var handledErrorsLs = []handledError{ errorType: ec2StateChangeTimeoutError, errorString: "timeout while waiting for state to become 'running'", metric: "ec2-timeout-state-change", - action: retryStack | emitMetric, + action: retryStack | emitMetric | changeAZ, }, { errorType: ioTimeout, @@ -100,6 +106,27 @@ var handledErrorsLs = []handledError{ metric: "ssh-connection-refused", action: retryStack | emitMetric, }, + { + errorType: ec2InstanceCreateTimeout, + errorString: "creating EC2 Instance: operation error", + metric: "ec2-instance-create-timeout", + action: retryStack | emitMetric, + }, + { + errorType: ddAgentRepoFailure, + errorString: "Failed to update the sources after adding the Datadog repository.", + metric: "apt-dd-agent-repo-failure", + action: emitMetric, + }, +} + +type retryHandler struct { + currentAZ int + maxRetries int + retryDelay time.Duration + allErrors []error + configMap runner.ConfigMap + infraEnv string } func errorMetric(errType string) datadogV2.MetricPayload { @@ -123,15 +150,29 @@ func errorMetric(errType string) datadogV2.MetricPayload { } } -func handleScenarioFailure(err error, changeRetryState func(handledError)) error { +func (r *retryHandler) HandleError(err error, retryCount int) (infra.RetryType, []infra.GetStackOption) { + r.allErrors = append(r.allErrors, err) + + if retryCount > r.maxRetries { + log.Printf("environment setup error: %v. Maximum number of retries (%d) exceeded, failing setup.\n", err, r.maxRetries) + return infra.NoRetry, nil + } + + var newOpts []infra.GetStackOption + retry := infra.NoRetry errStr := err.Error() for _, e := range handledErrorsLs { if !strings.Contains(errStr, e.errorString) { continue } - // modify any state within the retry block - changeRetryState(e) + if (e.action & changeAZ) != 0 { + r.currentAZ++ + if az := getAvailabilityZone(r.infraEnv, r.currentAZ); az != "" { + r.configMap["ddinfra:aws/defaultSubnets"] = auto.ConfigValue{Value: az} + newOpts = append(newOpts, infra.WithConfigMap(r.configMap)) + } + } if (e.action & emitMetric) != 0 { submitError := metric.SubmitExecutionMetric(errorMetric(e.metric)) @@ -145,15 +186,19 @@ func handleScenarioFailure(err error, changeRetryState func(handledError)) error } if (e.action & retryStack) != 0 { - log.Printf("environment setup error: %v. Retrying stack.\n", err) - return retry.RetryableError(err) + retry = infra.ReUp } break } - log.Printf("environment setup error: %v. Failing stack.\n", err) - return err + log.Printf("environment setup error. Retry strategy: %s.\n", retry) + if retry != infra.NoRetry { + log.Printf("waiting %s before retrying...\n", r.retryDelay) + time.Sleep(r.retryDelay) + } + + return retry, newOpts } func storeErrorReasonForCITags(reason string) error { @@ -177,3 +222,69 @@ func storeNumberOfRetriesForCITags(retries int) error { _, err = f.WriteString(fmt.Sprintf("%d", retries)) return err } + +type pulumiError struct { + command string + arch string + vmCommand string + errorMessage string + vmName string +} + +var commandRegex = regexp.MustCompile(`^ command:remote:Command \(([^\)]+)\):$`) + +func parsePulumiDiagnostics(message string) *pulumiError { + var perr pulumiError + lines := strings.Split(message, "\n") + inDiagnostics := false + for _, line := range lines { + if !inDiagnostics { + if line == "Diagnostics:" { + // skip until next line + inDiagnostics = true + } + continue + } + + if len(line) == 0 || line[0] != ' ' { + // Finished reading diagnostics, break out of the loop + return &perr + } + + if perr.command == "" { + commandMatch := commandRegex.FindStringSubmatch(line) + if commandMatch != nil { + perr.command = commandMatch[1] + + perr.arch, perr.vmCommand, perr.vmName = parsePulumiComand(perr.command) + } + } else { + perr.errorMessage += strings.Trim(line, " ") + "\n" + } + } + + return nil +} + +var archRegex = regexp.MustCompile(`distro_(arm64|x86_64)`) +var vmCmdRegex = regexp.MustCompile(`-cmd-.+-(?:ddvm-\d+-\d+|distro_(?:x86_64|arm64))-(.+)$`) +var vmNameRegex = regexp.MustCompile(`-(?:conn|cmd)-(?:arm64|x86_64)-([^-]+)-`) + +func parsePulumiComand(command string) (arch, vmCommand, vmName string) { + archMatch := archRegex.FindStringSubmatch(command) + if archMatch != nil { + arch = archMatch[1] + } + + vmCmdMatch := vmCmdRegex.FindStringSubmatch(command) + if vmCmdMatch != nil { + vmCommand = vmCmdMatch[1] + } + + vmNameMatch := vmNameRegex.FindStringSubmatch(command) + if vmNameMatch != nil { + vmName = vmNameMatch[1] + } + + return +} diff --git a/test/new-e2e/system-probe/errors_test.go b/test/new-e2e/system-probe/errors_test.go new file mode 100644 index 0000000000000..84ae28f4f9baf --- /dev/null +++ b/test/new-e2e/system-probe/errors_test.go @@ -0,0 +1,235 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +package systemprobe + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const outputLocalError = ` +Updating (gjulian-guillermo.julian-e2e-report-all-errors-ddvm): + + pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm running + pulumi:providers:random random +@ updating.... + dd:Host aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64 + pulumi:providers:aws aws + pulumi:providers:command command + random:index:RandomShuffle aws-rnd-subnet + random:index:RandomString random-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-random-string-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192 + command:local:Command local-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-gen-libvirt-sshkey + aws:ec2:Instance aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64 + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-wait-cloud-init + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-write-ssh-key + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-write-vol-xml + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-add-microvm-ssh-dir + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-allow-ssh-env + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-distro_arm64-download-with-curl + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-write-vol-xml + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-add-microvm-ssh-config + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-reload sshd + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-extract-base-volume-package + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-extract-base-volume-package + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-read-microvm-ssh-key + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-write-pool-xml + pulumi:providers:libvirt gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-libvirt-provider + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-define-libvirt-pool + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-build-libvirt-pool + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-start-libvirt-pool + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-refresh-libvirt-pool + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-build-libvirt-basevolume + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-build-libvirt-basevolume +@ updating..... + libvirt:index:Volume gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-ubuntu_22.04-distro_arm64-arm64-overlay-ubuntu_22.04-4-8192 + libvirt:index:Volume gjulian-guillermo.julian-e2e-report-all-errors-ddvm-global-pool-docker-arm64.qcow2-distro_arm64-arm64-overlay-ubuntu_22.04-4-8192 + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-create-nvram + libvirt:index:Network gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-network-distro_arm64-arm64 + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-allow-nfs-ports-tcp-100.1.0.0/24 + libvirt:index:Domain arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192 + command:remote:Command remote-aws-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-cmd-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-allow-nfs-ports-udp-100.1.0.0/24 + + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb creating (0s) +@ updating..... + + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb creating (2s) bash: line 1: caca: command not found + + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb creating (2s) error: Process exited with status 127: running " caca /mnt/docker && mount /dev/vdb /mnt/docker": + + command:remote:Command remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb **creating failed** error: Process exited with status 127: running " caca /mnt/docker && mount /dev/vdb /mnt/docker": + pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm running error: update failed + pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm **failed** 1 error +Diagnostics: + command:remote:Command (remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb): + error: Process exited with status 127: running " nocommand /mnt/docker && mount /dev/vdb /mnt/docker": + bash: line 1: nocommand: command not found + + pulumi:pulumi:Stack (e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm): + error: update failed + +Outputs: + kmt-stack: (json) { + arm64: { + ip : "172.29.176.14" + microvms: [ + [0]: { + id : "arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192" + ip : "100.1.0.2" + ssh-key-path: "/home/kernel-version-testing/ddvm_rsa" + tag : "ubuntu_22.04" + vmset-tags : [ + [0]: "distro_arm64" + ] + } + ] + } + } + + +Resources: + 36 unchanged + +Duration: 6s +` + +const outputSSHFailed = ` + pulumi:pulumi:Stack e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877 **failed** 1 error +Diagnostics: + pulumi:pulumi:Stack (e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877): + error: update failed + command:remote:Command (remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb): + error: proxy: after 60 failed attempts: ssh: rejected: connect failed (No route to host) +Outputs: + kmt-stack: [secret] +Resources: + +-8 replaced + 349 unchanged +Duration: 7m35s +` + +const outputSSHFailedWithChangedOrder = ` +@ updating.... + + pulumi:pulumi:Stack e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877 creating (933s) error: update failed + + pulumi:pulumi:Stack e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877 **creating failed (933s)** 1 error +Diagnostics: + command:remote:Command (remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb): + error: proxy: after 60 failed attempts: ssh: rejected: connect failed (No route to host) + + pulumi:pulumi:Stack (e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877): + error: update failed + +Outputs: + kmt-stack: [secret] + +Resources: + + 357 created + +Duration: 15m34s +` + +func TestParseDiagnostics(t *testing.T) { + cases := []struct { + caseName string + output string + result pulumiError + }{ + { + caseName: "LocalError", + output: outputLocalError, + result: pulumiError{ + command: "remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb", + arch: "arm64", + vmCommand: "mount-disk-dev-vdb", + errorMessage: "error: Process exited with status 127: running \" nocommand /mnt/docker && mount /dev/vdb /mnt/docker\":\nbash: line 1: nocommand: command not found\n", + vmName: "ubuntu_22.04", + }, + }, + { + caseName: "SSHFailed", + output: outputSSHFailed, + result: pulumiError{ + command: "remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb", + arch: "arm64", + vmCommand: "mount-disk-dev-vdb", + vmName: "fedora_37", + errorMessage: "error: proxy: after 60 failed attempts: ssh: rejected: connect failed (No route to host)\n", + }, + }, + { + caseName: "SSHFailedWithChangedOrder", + output: outputSSHFailedWithChangedOrder, + result: pulumiError{ + command: "remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb", + arch: "arm64", + vmCommand: "mount-disk-dev-vdb", + vmName: "fedora_37", + errorMessage: "error: proxy: after 60 failed attempts: ssh: rejected: connect failed (No route to host)\n", + }, + }, + } + + for _, c := range cases { + t.Run(c.caseName, func(tt *testing.T) { + result := parsePulumiDiagnostics(c.output) + require.NotNil(tt, result) + require.Equal(tt, c.result, *result) + }) + } +} + +func TestParsePulumiCommand(t *testing.T) { + cases := []struct { + caseName string + command string + arch string + vmCmd string + vmName string + }{ + { + caseName: "NoVMSet", + command: "remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb", + arch: "arm64", + vmCmd: "mount-disk-dev-vdb", + vmName: "ubuntu_22.04", + }, + { + caseName: "CommandWithoutVM", + command: "remote-aws-ci-634872953-4670-kernel-matrix-testing-system-probe-x86-64-44043832-x86_64-cmd-only_usm-distro_x86_64-download-with-curl", + arch: "x86_64", + vmCmd: "download-with-curl", + vmName: "", + }, + { + caseName: "DomainCreationCommand", + command: "remote-aws-ci-632806887-4670-kernel-matrix-testing-system-probe-arm64-43913143-arm64-cmd-arm64-debian_12-distro_arm64-no_usm-ddvm-4-12288-create-nvram", + arch: "arm64", + vmCmd: "create-nvram", + vmName: "debian_12", + }, + { + caseName: "AlteredTagOrder", + command: "remote-ci-632806887-4670-kernel-matrix-testing-system-probe-arm64-43913143-arm64-conn-arm64-ubuntu_23.10-only_usm-distro_arm64-ddvm-4-12288-cmd-arm64-ubuntu_23.10-only_usm-distro_arm64-ddvm-4-12288-set-docker-data-root", + arch: "arm64", + vmCmd: "set-docker-data-root", + vmName: "ubuntu_23.10", + }, + { + caseName: "CommandWithVMSet", + command: "remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb", + arch: "arm64", + vmCmd: "mount-disk-dev-vdb", + vmName: "fedora_37", + }, + } + + for _, c := range cases { + t.Run(c.caseName, func(tt *testing.T) { + arch, vmCmd, vmName := parsePulumiComand(c.command) + require.Equal(tt, c.arch, arch) + require.Equal(tt, c.vmCmd, vmCmd) + require.Equal(tt, c.vmName, vmName) + }) + } +} diff --git a/test/new-e2e/system-probe/system-probe-test-env.go b/test/new-e2e/system-probe/system-probe-test-env.go index 5a7a00321707d..2148e4d94427e 100644 --- a/test/new-e2e/system-probe/system-probe-test-env.go +++ b/test/new-e2e/system-probe/system-probe-test-env.go @@ -21,12 +21,13 @@ import ( "syscall" "time" + "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1" "github.com/DataDog/test-infra-definitions/scenarios/aws/microVMs/microvms" - "github.com/sethvargo/go-retry" "golang.org/x/term" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" + "github.com/DataDog/datadog-agent/test/new-e2e/system-probe/connector/metric" "github.com/pulumi/pulumi/sdk/v3/go/auto" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" @@ -176,6 +177,10 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* return nil, fmt.Errorf("No API Key for datadog-agent provided") } + ciJob := getEnv("CI_JOB_ID", "") + ciPipeline := getEnv("CI_PIPELINE_ID", "") + ciBranch := getEnv("CI_COMMIT_REF_NAME", "") + var customAMILocalWorkingDir string // Remote AMI working dir is always on Linux @@ -239,60 +244,43 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* config["ddinfra:extraResourcesTags"] = auto.ConfigValue{Value: envVars} } - var upResult auto.UpResult - var pulumiStack *auto.Stack - ctx := context.Background() - currentAZ := 0 // PrimaryAZ - b := retry.NewConstant(3 * time.Second) - // Retry 4 times. This allows us to cycle through all AZs, and handle libvirt - // connection issues in the worst case. - b = retry.WithMaxRetries(4, b) - numRetries := 0 - retryErr := retry.Do(ctx, b, func(_ context.Context) error { - if az := getAvailabilityZone(opts.InfraEnv, currentAZ); az != "" { - config["ddinfra:aws/defaultSubnets"] = auto.ConfigValue{Value: az} - } - - pulumiStack, upResult, err = stackManager.GetStackNoDeleteOnFailure( - systemProbeTestEnv.context, - systemProbeTestEnv.name, - func(ctx *pulumi.Context) error { - if err := microvms.Run(ctx); err != nil { - return fmt.Errorf("setup micro-vms in remote instance: %w", err) - } - return nil - }, - infra.WithFailOnMissing(opts.FailOnMissing), - infra.WithConfigMap(config), - ) + retryHandler := retryHandler{ + currentAZ: 0, + maxRetries: 4, + retryDelay: 3 * time.Second, + configMap: config, + infraEnv: opts.InfraEnv, + } - if err != nil { - numRetries++ - return handleScenarioFailure(err, func(possibleError handledError) { - // handle the following errors by trying in a different availability zone - if possibleError.errorType == insufficientCapacityError || - possibleError.errorType == ec2StateChangeTimeoutError { - currentAZ++ - } - }) - } + stackManager.GetRetryStrategyFrom = retryHandler.HandleError + pulumiStack, upResult, pulumiErr := stackManager.GetStackNoDeleteOnFailure( + systemProbeTestEnv.context, + systemProbeTestEnv.name, + func(ctx *pulumi.Context) error { + if err := microvms.Run(ctx); err != nil { + return fmt.Errorf("setup micro-vms in remote instance: %w", err) + } + return nil + }, + infra.WithFailOnMissing(opts.FailOnMissing), + infra.WithConfigMap(config), + ) + if pulumiErr != nil { // Mark the test as successful, just in case we succeeded after a retry - err = storeErrorReasonForCITags("") + err := storeErrorReasonForCITags("") if err != nil { log.Printf("failed to store error reason for CI tags: %v", err) } + } - return nil - }) - - err = storeNumberOfRetriesForCITags(numRetries) + err = storeNumberOfRetriesForCITags(len(retryHandler.allErrors)) if err != nil { log.Printf("failed to store number of retries for CI tags: %v", err) } outputs := upResult.Outputs - if retryErr != nil { + if pulumiErr != nil { // pulumi does not populate `UpResult` with the stack output if the // update process failed. In this case we must manually fetch the outputs. outputs, err = pulumiStack.Outputs(context.Background()) @@ -305,8 +293,47 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* if err != nil { err = fmt.Errorf("failed to write stack output to file: %w", err) } - if retryErr != nil { - return nil, errors.Join(fmt.Errorf("failed to create stack: %w", retryErr), err) + if pulumiErr != nil { + for i, retryErr := range retryHandler.allErrors { + pulumiError := parsePulumiDiagnostics(retryErr.Error()) + if pulumiError != nil { + log.Printf("pulumi error on retry %d:\n\tcommand: %s\n\tvm-command: %s\n\terror message:\n%s\n\n", i, pulumiError.command, pulumiError.vmCommand, pulumiError.errorMessage) + + // Send the error as a DD event so we can track it + event := datadogV1.EventCreateRequest{ + Title: "[KMT] Environment setup error", + Text: pulumiError.errorMessage, + Tags: []string{ + "test:kmt", + "source:pulumi", + "repository:datadog/datadog-agent", + "team:ebpf-platform", + fmt.Sprintf("vm.name:%s", pulumiError.vmName), + fmt.Sprintf("vm.arch:%s", pulumiError.arch), + fmt.Sprintf("vm.command:%s", pulumiError.vmCommand), + }, + } + + if ciJob != "" { + event.Tags = append(event.Tags, fmt.Sprintf("ci.job.id:%s", ciJob)) + } + + if ciPipeline != "" { + event.Tags = append(event.Tags, fmt.Sprintf("ci.pipeline.id:%s", ciPipeline)) + } + + if ciBranch != "" { + event.Tags = append(event.Tags, fmt.Sprintf("ci.branch:%s", ciBranch)) + } + + if err = metric.SubmitExecutionEvent(event); err != nil { + log.Printf("failed to submit environment setup error event: %v", err) + } + } else { + log.Printf("unknown/couldn't parse error on retry %d", i) + } + } + return nil, errors.Join(fmt.Errorf("failed to create stack: %w", pulumiErr), err) } systemProbeTestEnv.StackOutput = upResult diff --git a/test/new-e2e/system-probe/test-runner/main.go b/test/new-e2e/system-probe/test-runner/main.go index c5bc806155db1..a7b21754bd50e 100644 --- a/test/new-e2e/system-probe/test-runner/main.go +++ b/test/new-e2e/system-probe/test-runner/main.go @@ -65,7 +65,7 @@ var timeouts = map[*regexp.Regexp]time.Duration{ regexp.MustCompile("pkg/network/tracer$"): 55 * time.Minute, regexp.MustCompile("pkg/network/usm$"): 55 * time.Minute, regexp.MustCompile("pkg/network/usm/tests$"): 20 * time.Minute, - regexp.MustCompile("pkg/security.*"): 30 * time.Minute, + regexp.MustCompile("pkg/security.*"): 45 * time.Minute, } func getTimeout(pkg string) time.Duration { diff --git a/test/new-e2e/system-probe/vm-metrics/vm-metrics.go b/test/new-e2e/system-probe/vm-metrics/vm-metrics.go new file mode 100644 index 0000000000000..9d2d0edae409a --- /dev/null +++ b/test/new-e2e/system-probe/vm-metrics/vm-metrics.go @@ -0,0 +1,283 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +// Package systemprobe is sets up the remote testing environment for system-probe using the Kernel Matrix Testing framework +package main + +import ( + "flag" + "fmt" + "log" + "os" + "regexp" + "syscall" + "time" + + "github.com/DataDog/datadog-go/v5/statsd" + "github.com/digitalocean/go-libvirt" + "github.com/digitalocean/go-libvirt/socket/dialers" +) + +const kmtMicroVmsPrefix = "kmt.microvm." + +var distrosMatch = map[string]*regexp.Regexp{ + "ubuntu": regexp.MustCompile(`-(ubuntu_[\.,\d]{1,5}).*-`), + "fedora": regexp.MustCompile(`-(fedora_[\.,\d]{1,5}).*-`), + "debian": regexp.MustCompile(`-(debian_[\.,\d]{1,5}).*-`), + "centos": regexp.MustCompile(`-(centos_[\.,\d]{1,5}).*-`), + "amazon": regexp.MustCompile(`-(amazon_[\.,\d]{1,5}).*-`), + "rocky": regexp.MustCompile(`-(rocky_[\.,\d]{1,5}).*-`), + "oracle": regexp.MustCompile(`-(oracle_[\.,\d]{1,5}).*-`), + "opensuse": regexp.MustCompile(`-(opensuse_[\.,\d]{1,5}).*-`), + "suse": regexp.MustCompile(`-(suse_[\.,\d]{1,5}).*-`), +} + +var memStatTagToName = map[libvirt.DomainMemoryStatTags]string{ + libvirt.DomainMemoryStatSwapIn: "swap_in_bytes", // The total amount of data read from swap space (in kB). + libvirt.DomainMemoryStatSwapOut: "swap_out_bytes", // The total amount of memory written out to swap space (in kB). + libvirt.DomainMemoryStatMajorFault: "major_pagefault", // Page faults occur when a process makes a valid access to virtual memory that is not available. When servicing the page fault, if disk IO is required, it is considered a major fault. + libvirt.DomainMemoryStatAvailable: "memory_available_bytes", // The total amount of usable memory as seen by the domain. This value may be less than the amount of memory assigned to the domain if a balloon driver is in use or if the guest OS does not initialize all assigned pages. This value is expressed in kB. + libvirt.DomainMemoryStatActualBalloon: "memory_actual_balloon_bytes", // Current balloon value (in KB). + libvirt.DomainMemoryStatRss: "memory_rss_bytes", // Resident Set Size of the process running the domain. This value is in kB + libvirt.DomainMemoryStatUsable: "memory_usable_bytes", // How much the balloon can be inflated without pushing the guest system to swap, corresponds to 'Available' in /proc/meminfo + libvirt.DomainMemoryStatUnused: "memory_unused_bytes", // The amount of memory left completely unused by the system. Memory that is available but used for reclaimable caches should NOT be reported as free. This value is expressed in kB. +} + +type libvirtInterface interface { + ConnectListAllDomains(int32, libvirt.ConnectListAllDomainsFlags) ([]libvirt.Domain, uint32, error) + DomainMemoryStats(libvirt.Domain, uint32, uint32) ([]libvirt.DomainMemoryStat, error) +} + +type libvirtExporter struct { + libvirt libvirtInterface + statsdClient statsd.ClientInterface +} + +func newLibvirtExporter(l libvirtInterface, client statsd.ClientInterface) *libvirtExporter { + return &libvirtExporter{ + libvirt: l, + statsdClient: client, + } +} + +func (l *libvirtExporter) collect() ([]*domainMetrics, error) { + return collectLibvirtMetrics(l.libvirt) +} + +func (l *libvirtExporter) submit(metrics []*domainMetrics) error { + for _, dm := range metrics { + for _, m := range dm.metrics { + if err := l.statsdClient.Gauge(kmtMicroVmsPrefix+m.name, float64(m.value), m.tags, 1); err != nil { + return fmt.Errorf("error sending metric: %w", err) + } + } + } + if err := l.statsdClient.Flush(); err != nil { + return fmt.Errorf("failed to flush client: %w", err) + } + + return nil +} + +type statsdMetric struct { + name string + value uint64 + tags []string +} + +type domainMetrics struct { + osID string + metrics []statsdMetric + + libvirtDomain libvirt.Domain +} + +func (d *domainMetrics) addMetric(name string, value uint64, tags []string) { + d.metrics = append(d.metrics, statsdMetric{ + name: name, + value: value, + tags: tags, + }) +} + +func kbToBytes(kb uint64) uint64 { + return kb * 1024 +} + +func (d *domainMetrics) collectDomainMemoryStatInfo(l libvirtInterface) error { + memStats, err := l.DomainMemoryStats(d.libvirtDomain, uint32(libvirt.DomainMemoryStatNr), 0) + if err != nil { + return fmt.Errorf("failed to get memory stats: %w", err) + } + + tags := []string{fmt.Sprintf("os:%s", d.osID)} + for _, stat := range memStats { + if statString, ok := memStatTagToName[libvirt.DomainMemoryStatTags(stat.Tag)]; ok { + if stat.Tag == int32(libvirt.DomainMemoryStatMajorFault) { + d.addMetric(statString, stat.Val, tags) + } else { + d.addMetric(statString, kbToBytes(stat.Val), tags) + } + } + } + + return nil +} + +func collectLibvirtMetrics(l libvirtInterface) ([]*domainMetrics, error) { + var dMetrics []*domainMetrics + + domains, _, err := l.ConnectListAllDomains(1, libvirt.ConnectListDomainsActive) + if err != nil { + return nil, fmt.Errorf("failed to list domains: %w", err) + } + + for _, d := range domains { + osID := parseOSInformation(d.Name) + if osID == "" { + continue + } + + dMetrics = append(dMetrics, &domainMetrics{ + osID: osID, + libvirtDomain: d, + }) + } + + for _, d := range dMetrics { + if err := d.collectDomainMemoryStatInfo(l); err != nil { + return nil, fmt.Errorf("failed to collect memory stats for domain %s: %w", d.osID, err) + } + } + + return dMetrics, nil +} + +func parseOSInformation(name string) string { + for _, distro := range distrosMatch { + if match := distro.FindStringSubmatch(name); match != nil { + return match[1] + } + } + + return "" +} + +type tagsList []string + +func (t *tagsList) String() string { + return fmt.Sprintf("%v", *t) +} + +func (t *tagsList) Set(value string) error { + *t = append(*t, value) + return nil +} + +// runAsDaemon function runs the vm-metrics collector as a daemon +// To daemonize a process this function: +// - forksExec the vm-metrics binary, allowing the parent to exit. +// this makes the new process the child of the init process. +// - setsid() on child process. Make the child the session leader +// and release it from the original controlling terminal. +// - Reset umask, so that files are created with the requested +// permissions +func runAsDaemon(daemonLogFile string) error { + if daemonLogFile == "" { + daemonLogFile = "/tmp/vm-metrics.log" + } + + if _, isDaemon := os.LookupEnv("DAEMON_COLLECTOR"); !isDaemon { + f, err := os.OpenFile(daemonLogFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("failed to open daemon log file: %w", err) + } + + if _, err := syscall.ForkExec(os.Args[0], os.Args, &syscall.ProcAttr{ + Dir: "/", + Env: append(os.Environ(), "DAEMON_COLLECTOR=1"), + Sys: &syscall.SysProcAttr{ + Setsid: true, + }, + Files: []uintptr{0, f.Fd(), f.Fd()}, // print message to the same pty + }); err != nil { + return fmt.Errorf("failed to fork/exec parent process: %w", err) + } + + os.Exit(0) + } + + // close stdin + stdin := os.NewFile(0, "stdin") + stdin.Close() + + // open /dev/null as stdin + if _, err := os.Open("/dev/null"); err != nil { + return fmt.Errorf("failed to open '/dev/null' as stdin: %w", err) + } + + // clear umask + syscall.Umask(0) + + return nil +} + +func main() { + var globalTags tagsList + + statsdPort := flag.String("statsd-port", "8125", "Statsd port") + statsdHost := flag.String("statsd-host", "127.0.0.1", "Statsd host") + collectionInterval := flag.Duration("interval", time.Second*20, "interval for collecting vm stats") + libvirtDaemonURI := flag.String("libvirt-uri", "", "libvirt daemon URI") + daemonize := flag.Bool("daemon", false, "run collector as a daemon") + daemonLogFile := flag.String("log-file", "", "log file daemon") + flag.Var(&globalTags, "tag", "global tags to set") + flag.Parse() + + if *daemonize { + if err := runAsDaemon(*daemonLogFile); err != nil { + log.Printf("failed to run collector as daemon: %v", err) + return + } + } + + log.Printf("VM metrics collector started") + + dialer := dialers.NewLocal(dialers.WithSocket(*libvirtDaemonURI), dialers.WithLocalTimeout((5 * time.Second))) + l := libvirt.NewWithDialer(dialer) + if err := l.ConnectToURI(libvirt.QEMUSystem); err != nil { + log.Fatalf("failed to connect to libvirt: %v", err) + } + defer func() { + if err := l.Disconnect(); err != nil { + log.Printf("failed to disconnect: %v", err) + } + }() + + log.Printf("launching statsd with global tags: %v", globalTags) + dogstatsdClient, err := statsd.New(fmt.Sprintf("%s:%s", *statsdHost, *statsdPort), statsd.WithTags(globalTags)) + if err != nil { + log.Fatal(err) + } + + lexporter := newLibvirtExporter(l, dogstatsdClient) + + for range time.Tick(*collectionInterval) { + metrics, err := lexporter.collect() + if err != nil { + log.Fatal(err) + } + + log.Println("Submitting metrics to statsd:") + for _, m := range metrics { + log.Printf(" %v", *m) + } + if err := lexporter.submit(metrics); err != nil { + log.Fatal(err) + } + } +} diff --git a/test/new-e2e/system-probe/vm-metrics/vm-metrics_test.go b/test/new-e2e/system-probe/vm-metrics/vm-metrics_test.go new file mode 100644 index 0000000000000..657859a3d68fa --- /dev/null +++ b/test/new-e2e/system-probe/vm-metrics/vm-metrics_test.go @@ -0,0 +1,139 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build !windows + +package main + +import ( + "strings" + "testing" + + "github.com/DataDog/datadog-agent/pkg/trace/teststatsd" + "github.com/digitalocean/go-libvirt" + "github.com/stretchr/testify/require" +) + +var memStats = map[libvirt.DomainMemoryStatTags]uint64{ + libvirt.DomainMemoryStatSwapIn: 10, + libvirt.DomainMemoryStatSwapOut: 20, + libvirt.DomainMemoryStatMajorFault: 30, + libvirt.DomainMemoryStatAvailable: 40, + libvirt.DomainMemoryStatActualBalloon: 50, + libvirt.DomainMemoryStatRss: 60, + libvirt.DomainMemoryStatUnused: 70, + libvirt.DomainMemoryStatUsable: 80, +} + +var nameToTag = map[string]libvirt.DomainMemoryStatTags{ + "swap_in_bytes": libvirt.DomainMemoryStatSwapIn, + "swap_out_bytes": libvirt.DomainMemoryStatSwapOut, + "major_pagefault": libvirt.DomainMemoryStatMajorFault, + "memory_available_bytes": libvirt.DomainMemoryStatAvailable, + "memory_actual_balloon_bytes": libvirt.DomainMemoryStatActualBalloon, + "memory_rss_bytes": libvirt.DomainMemoryStatRss, + "memory_usable_bytes": libvirt.DomainMemoryStatUsable, + "memory_unused_bytes": libvirt.DomainMemoryStatUnused, +} + +func TestParseOSInformation(t *testing.T) { + cases := map[string]string{ + "x86_64-fedora_37-distro_x86_64-no_usm-ddvm-4-12288": "fedora_37", + "x86_64-fedora_38-distro_x86_64-no_usm-ddvm-4-12288": "fedora_38", + "x86_64-amazon_4.14-distro_x86_64-no_usm-ddvm-4-12288": "amazon_4.14", + "x86_64-amazon_5.10-distro_x86_64-no_usm-ddvm-4-12288": "amazon_5.10", + "x86_64-amazon_5.4-distro_x86_64-no_usm-ddvm-4-12288": "amazon_5.4", + "x86_64-amazon_2023-distro_x86_64-no_usm-ddvm-4-12288": "amazon_2023", + "x86_64-centos_7.9-distro_x86_64-no_usm-ddvm-4-12288": "centos_7.9", + "x86_64-centos_8-distro_x86_64-no_usm-ddvm-4-12288": "centos_8", + "x86_64-ubuntu_24.04-all_tests-distro_x86_64-ddvm-4-12288": "ubuntu_24.04", + "arm64-ubuntu_23.10-distro_arm64-no_usm-ddvm-4-12288": "ubuntu_23.10", + "arm64-ubuntu_22.04-distro_arm64-no_usm-ddvm-4-12288": "ubuntu_22.04", + "arm64-ubuntu_20.04-distro_arm64-no_usm-ddvm-4-12288": "ubuntu_20.04", + "arm64-ubuntu_18.04-distro_arm64-no_usm-ddvm-4-12288": "ubuntu_18.04", + "x86_64-ubuntu_16.04-distro_x86_64-no_usm-ddvm-4-12288": "ubuntu_16.04", + "x86_64-debian_9-distro_x86_64-no_usm-ddvm-4-12288": "debian_9", + "x86_64-debian_10-only_usm-distro_x86_64-ddvm-4-12288": "debian_10", + "x86_64-debian_11-only_usm-distro_x86_64-ddvm-4-12288": "debian_11", + "x86_64-debian_12-only_usm-distro_x86_64-ddvm-4-12288": "debian_12", + "x86_64-suse_12.5-all_tests-distro_x86_64-ddvm-4-12288": "suse_12.5", + "x86_64-opensuse_15.5-all_tests-distro_x86_64-ddvm-4-12288": "opensuse_15.5", + "x86_64-opensuse_15.3-all_tests-distro_x86_64-ddvm-4-12288": "opensuse_15.3", + "x86_64-rocky_9.3-all_tests-distro_x86_64-ddvm-4-12288": "rocky_9.3", + "x86_64-rocky_8.5-all_tests-distro_x86_64-ddvm-4-12288": "rocky_8.5", + "x86_64-oracle_9.3-all_tests-distro_x86_64-ddvm-4-12288": "oracle_9.3", + "x86_64-oracle_8.9-all_tests-distro_x86_64-ddvm-4-12288": "oracle_8.9", + } + + for id, os := range cases { + osID := parseOSInformation(id) + require.Equal(t, osID, os) + } +} + +type libvirtMock struct{} + +func (l *libvirtMock) ConnectListAllDomains(_ int32, _ libvirt.ConnectListAllDomainsFlags) ([]libvirt.Domain, uint32, error) { + return []libvirt.Domain{ + {Name: "x86_64-debian_12-only_usm-distro_x86_64-ddvm-4-12288"}, + {Name: "x86_64-ubuntu_16.04-distro_x86_64-no_usm-ddvm-4-12288"}, + }, 0, nil +} + +func (l *libvirtMock) DomainMemoryStats(_ libvirt.Domain, _ uint32, _ uint32) ([]libvirt.DomainMemoryStat, error) { + var stats []libvirt.DomainMemoryStat + for tag, val := range memStats { + stats = append(stats, libvirt.DomainMemoryStat{ + Tag: int32(tag), + Val: val, + }) + } + return stats, nil +} + +func bytesToKb(bytes uint64) uint64 { + return bytes / 1024 +} + +func TestLibvirtCollectMetrics(t *testing.T) { + lexporter := newLibvirtExporter(&libvirtMock{}, &teststatsd.Client{}) + + domainMetrics, err := lexporter.collect() + require.NoError(t, err) + + for _, dm := range domainMetrics { + for _, m := range dm.metrics { + tag, ok := nameToTag[m.name] + require.True(t, ok) + + if tag == libvirt.DomainMemoryStatMajorFault { + require.Equal(t, memStats[tag], m.value) + } else { + require.Equal(t, memStats[tag], bytesToKb(m.value)) + } + } + } +} +func TestLibvirtSubmitMetrics(t *testing.T) { + lexporter := newLibvirtExporter(&libvirtMock{}, &teststatsd.Client{}) + + domainMetrics, err := lexporter.collect() + require.NoError(t, err) + + err = lexporter.submit(domainMetrics) + require.NoError(t, err) + + for name, summary := range lexporter.statsdClient.(*teststatsd.Client).GetGaugeSummaries() { + statName := strings.TrimPrefix(name, kmtMicroVmsPrefix) + expectedVal := memStats[nameToTag[statName]] + if statName != "major_pagefault" { + expectedVal *= 1024 + } + + for _, call := range summary.Calls { + require.Equal(t, call.Value, float64(expectedVal)) + } + } +} diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/k8s-logs/file_tailing_test.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/k8s-logs/file_tailing_test.go index 958f2988e0ea9..bbf6d8ccf524e 100644 --- a/test/new-e2e/tests/agent-metrics-logs/log-agent/k8s-logs/file_tailing_test.go +++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/k8s-logs/file_tailing_test.go @@ -7,8 +7,8 @@ package k8sfiletailing import ( "context" + _ "embed" "fmt" - "os" "testing" "time" @@ -61,18 +61,23 @@ func (v *k8sSuite) TestSingleLogAndMetadata() { } _, err = v.Env().KubernetesCluster.Client().BatchV1().Jobs("default").Create(context.TODO(), jobSpcec, metav1.CreateOptions{}) - assert.NoError(v.T(), err, "Could not properly start job") + require.NoError(v.T(), err, "Could not properly start job") v.EventuallyWithT(func(c *assert.CollectT) { logsServiceNames, err := v.Env().FakeIntake.Client().GetLogServiceNames() assert.NoError(c, err, "Error starting job") + if err != nil { + return + } if assert.Contains(c, logsServiceNames, "ubuntu", "Ubuntu service not found") { filteredLogs, err := v.Env().FakeIntake.Client().FilterLogs("ubuntu") assert.NoError(c, err, "Error filtering logs") - if assert.NotEmpty(v.T(), filteredLogs, "Fake Intake returned no logs even though log service name exists") { + if err != nil { + return + } + if assert.NotEmpty(c, filteredLogs, "Fake Intake returned no logs even though log service name exists") { assert.Equal(c, testLogMessage, filteredLogs[0].Message, "Test log doesn't match") - // Check container metatdata assert.Equal(c, filteredLogs[0].Service, "ubuntu", "Could not find service") assert.NotNil(c, filteredLogs[0].HostName, "Hostname not found") @@ -83,12 +88,13 @@ func (v *k8sSuite) TestSingleLogAndMetadata() { }, 1*time.Minute, 10*time.Second) } +//go:embed long_line_log.txt +var longLineLog string + func (v *k8sSuite) TestLongLogLine() { err := v.Env().FakeIntake.Client().FlushServerAndResetAggregators() require.NoError(v.T(), err, "Could not reset the FakeIntake") var backOffLimit int32 = 4 - file, err := os.ReadFile("long_line_log.txt") - assert.NoError(v.T(), err, "Could not open long line file.") jobSpcec := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -102,7 +108,7 @@ func (v *k8sSuite) TestLongLogLine() { { Name: "long-line-job", Image: "ubuntu", - Command: []string{"echo", string(file)}, + Command: []string{"echo", longLineLog}, }, }, RestartPolicy: corev1.RestartPolicyNever, @@ -113,17 +119,23 @@ func (v *k8sSuite) TestLongLogLine() { } _, err = v.Env().KubernetesCluster.Client().BatchV1().Jobs("default").Create(context.TODO(), jobSpcec, metav1.CreateOptions{}) - assert.NoError(v.T(), err, "Could not properly start job") + require.NoError(v.T(), err, "Could not properly start job") v.EventuallyWithT(func(c *assert.CollectT) { logsServiceNames, err := v.Env().FakeIntake.Client().GetLogServiceNames() assert.NoError(c, err, "Error starting job") + if err != nil { + return + } if assert.Contains(c, logsServiceNames, "ubuntu", "Ubuntu service not found") { filteredLogs, err := v.Env().FakeIntake.Client().FilterLogs("ubuntu") assert.NoError(c, err, "Error filtering logs") - if assert.NotEmpty(v.T(), filteredLogs, "Fake Intake returned no logs even though log service name exists") { - assert.Equal(c, string(file), fmt.Sprintf("%s%s", filteredLogs[0].Message, "\n"), "Test log doesn't match") + if err != nil { + return + } + if assert.NotEmpty(c, filteredLogs, "Fake Intake returned no logs even though log service name exists") { + assert.Equal(c, longLineLog, fmt.Sprintf("%s%s", filteredLogs[0].Message, "\n"), "Test log doesn't match") } } @@ -142,7 +154,7 @@ func (v *k8sSuite) TestContainerExclude() { }, } _, err = v.Env().KubernetesCluster.Client().CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{}) - assert.NoError(v.T(), err, "Could not create namespace") + require.NoError(v.T(), err, "Could not create namespace") var backOffLimit int32 = 4 testLogMessage := "Test log message here" @@ -170,11 +182,14 @@ func (v *k8sSuite) TestContainerExclude() { } _, err = v.Env().KubernetesCluster.Client().BatchV1().Jobs(namespaceName).Create(context.TODO(), jobSpcec, metav1.CreateOptions{}) - assert.NoError(v.T(), err, "Could not properly start job") + require.NoError(v.T(), err, "Could not properly start job") v.EventuallyWithT(func(c *assert.CollectT) { logsServiceNames, err := v.Env().FakeIntake.Client().GetLogServiceNames() assert.NoError(c, err, "Error starting job") + if err != nil { + return + } assert.NotContains(c, logsServiceNames, "alpine", "Alpine service found after excluded") }, 1*time.Minute, 10*time.Second) } diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go index 23aaedb2f92bd..14b22696bf37f 100644 --- a/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go +++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/linux-log/journald/journald_tailing_test.go @@ -82,8 +82,13 @@ func (s *LinuxJournaldFakeintakeSuite) journaldLogCollection() { _, err := s.Env().RemoteHost.Execute("sudo usermod -a -G systemd-journal dd-agent") require.NoErrorf(t, err, "Unable to adjust permissions for dd-agent user: %s", err) - // Restart agent - s.Env().RemoteHost.Execute("sudo systemctl restart datadog-agent") + // Restart agent and make sure it's ready before adding logs + _, err = s.Env().RemoteHost.Execute("sudo systemctl restart datadog-agent") + assert.NoErrorf(t, err, "Failed to restart the agent: %s", err) + s.EventuallyWithT(func(_ *assert.CollectT) { + agentReady := s.Env().Agent.Client.IsReady() + assert.True(t, agentReady) + }, 1*time.Minute, 5*time.Second, "Agent was not ready") // Generate log appendJournaldLog(s, "hello-world", 1) diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/utils/file_tailing_utils.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/utils/file_tailing_utils.go index 65eeaa1d19089..f8950e85c532a 100644 --- a/test/new-e2e/tests/agent-metrics-logs/log-agent/utils/file_tailing_utils.go +++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/utils/file_tailing_utils.go @@ -117,9 +117,8 @@ func CheckLogFilePresence(ls LogsTestSuite, logFileName string) { } // FetchAndFilterLogs fetches logs from the fake intake server and filters them by service and content. -func FetchAndFilterLogs(t *testing.T, fakeIntake *components.FakeIntake, service, content string) ([]*aggregator.Log, error) { +func FetchAndFilterLogs(fakeIntake *components.FakeIntake, service, content string) ([]*aggregator.Log, error) { client := fakeIntake.Client() - t.Helper() names, err := client.GetLogServiceNames() if err != nil { @@ -154,7 +153,8 @@ func CheckLogsExpected(t *testing.T, fakeIntake *components.FakeIntake, service, t.Helper() assert.EventuallyWithT(t, func(c *assert.CollectT) { - logs, err := FetchAndFilterLogs(t, fakeIntake, service, content) + logs, err := FetchAndFilterLogs(fakeIntake, service, content) + if assert.NoErrorf(c, err, "Error fetching logs: %s", err) { intakeLog := logsToString(logs) if assert.NotEmpty(c, logs, "Expected logs with content: '%s' not found. Instead, found: %s", content, intakeLog) { @@ -171,11 +171,11 @@ func CheckLogsExpected(t *testing.T, fakeIntake *components.FakeIntake, service, // CheckLogsNotExpected verifies the absence of unexpected logs. func CheckLogsNotExpected(t *testing.T, fakeIntake *components.FakeIntake, service, content string) { t.Helper() - + t.Logf("Checking for logs from service: '%s' with content: '%s' are not collected", service, content) assert.EventuallyWithT(t, func(c *assert.CollectT) { - logs, err := FetchAndFilterLogs(t, fakeIntake, service, content) - intakeLog := logsToString(logs) + logs, err := FetchAndFilterLogs(fakeIntake, service, content) if assert.NoErrorf(c, err, "Error fetching logs: %s", err) { + intakeLog := logsToString(logs) if assert.Empty(c, logs, "Unexpected logs with content: '%s' found. Instead, found: %s", content, intakeLog) { t.Logf("No logs from service: '%s' with content: '%s' collected as expected", service, content) } diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/windows-log/file-tailing/file_tailing_test.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/windows-log/file-tailing/file_tailing_test.go index 2f13e89793c07..c2becb766729a 100644 --- a/test/new-e2e/tests/agent-metrics-logs/log-agent/windows-log/file-tailing/file_tailing_test.go +++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/windows-log/file-tailing/file_tailing_test.go @@ -14,13 +14,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + testos "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-metrics-logs/log-agent/utils" - "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" - testos "github.com/DataDog/test-infra-definitions/components/os" - "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" ) // WindowsFakeintakeSuite defines a test suite for the log agent interacting with a virtual machine and fake intake. @@ -137,17 +138,15 @@ func (s *WindowsFakeintakeSuite) testLogNoPermission() { assert.NoErrorf(t, err, "Unable to adjust permissions for the log file %s.", logFilePath) t.Logf("Read permissions revoked") - // Generate logs and check the intake for no new logs because of revoked permissions + // wait for agent to be ready after restart s.EventuallyWithT(func(c *assert.CollectT) { - agentReady := s.Env().Agent.Client.IsReady() - if assert.Truef(c, agentReady, "Agent is not ready after restart") { - // Generate log - utils.AppendLog(s, logFileName, "access-denied", 1) - // Check intake for new logs - utils.CheckLogsNotExpected(s.T(), s.Env().FakeIntake, "hello", "access-denied") - } + assert.Truef(c, s.Env().Agent.Client.IsReady(), "Agent is not ready after restart") }, 2*time.Minute, 5*time.Second) + // Generate logs and check the intake for no new logs because of revoked permissions + utils.AppendLog(s, logFileName, "access-denied", 1) + // Check intake for new logs + utils.CheckLogsNotExpected(s.T(), s.Env().FakeIntake, "hello", "access-denied") } func (s *WindowsFakeintakeSuite) testLogCollectionAfterPermission() { diff --git a/test/new-e2e/tests/agent-platform/common/agent_behaviour.go b/test/new-e2e/tests/agent-platform/common/agent_behaviour.go index e747cc7bf96c6..c12f44a6f0669 100644 --- a/test/new-e2e/tests/agent-platform/common/agent_behaviour.go +++ b/test/new-e2e/tests/agent-platform/common/agent_behaviour.go @@ -212,7 +212,7 @@ const ( ExpectedPythonVersion2 = "2.7.18" // ExpectedPythonVersion3 is the expected python 3 version // Bump this version when the version in omnibus/config/software/python3.rb changes - ExpectedPythonVersion3 = "3.11.8" + ExpectedPythonVersion3 = "3.12.6" ) // SetAgentPythonMajorVersion set the python major version in the agent config and restarts the agent diff --git a/test/new-e2e/tests/agent-platform/common/agent_install.go b/test/new-e2e/tests/agent-platform/common/agent_install.go index 47a941e100f89..b3d8eafb661c4 100644 --- a/test/new-e2e/tests/agent-platform/common/agent_install.go +++ b/test/new-e2e/tests/agent-platform/common/agent_install.go @@ -120,6 +120,6 @@ func CheckUninstallation(t *testing.T, client *TestClient) { installFolderPath := client.Helper.GetInstallFolder() entries, err := client.FileManager.ReadDir(installFolderPath) - require.Error(tt, err, "should not find anything in install folder, found %v dir entries ", len(entries)) + require.Error(tt, err, "should not find anything in install folder, found %v dir entries.\nContent: %+v ", len(entries), entries) }) } diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go b/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go index 5499a802a5e65..7745859ee3e3d 100644 --- a/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go +++ b/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" @@ -29,6 +30,9 @@ type configRefreshWindowsSuite struct { } func TestConfigRefreshWindowsSuite(t *testing.T) { + // WINA-1014 + flake.Mark(t) + t.Parallel() e2e.Run(t, &configRefreshWindowsSuite{}, e2e.WithProvisioner(awshost.Provisioner(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))) } diff --git a/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go b/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go index 0528334d553c0..53d9f4abe4836 100644 --- a/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go +++ b/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" @@ -26,6 +27,9 @@ type windowsRuntimeSecretSuite struct { } func TestWindowsRuntimeSecretSuite(t *testing.T) { + // WINA-1014 + flake.Mark(t) + t.Parallel() e2e.Run(t, &windowsRuntimeSecretSuite{}, e2e.WithProvisioner(awshost.Provisioner( awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault)), diff --git a/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go b/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go index 74562bfff59eb..6f6e744e09bcb 100644 --- a/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/configcheck/configcheck_nix_test.go @@ -25,7 +25,7 @@ func TestLinuxConfigCheckSuite(t *testing.T) { e2e.Run(t, &linuxConfigCheckSuite{}, e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake())) } -// cpu, disk, file_handle, io, load, memory, network, ntp, uptime +// cpu, disk, file_handle, io, load, memory, network, ntp, uptime, service_discovery func (v *linuxConfigCheckSuite) TestDefaultInstalledChecks() { testChecks := []CheckConfigOutput{ { @@ -82,6 +82,12 @@ func (v *linuxConfigCheckSuite) TestDefaultInstalledChecks() { InstanceID: "uptime:", Settings: "{}", }, + { + CheckName: "service_discovery", + Filepath: "file:/etc/datadog-agent/conf.d/service_discovery.d/conf.yaml.default", + InstanceID: "service_discovery:", + Settings: "{}", + }, } output := v.Env().Agent.Client.ConfigCheck() diff --git a/test/new-e2e/tests/agent-subcommands/health/health_common_test.go b/test/new-e2e/tests/agent-subcommands/health/health_common_test.go new file mode 100644 index 0000000000000..572282b7e1875 --- /dev/null +++ b/test/new-e2e/tests/agent-subcommands/health/health_common_test.go @@ -0,0 +1,84 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package health + +import ( + "net/http" + "time" + + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + + "github.com/DataDog/datadog-agent/test/fakeintake/api" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" + + "github.com/cenkalti/backoff" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type baseHealthSuite struct { + e2e.BaseSuite[environments.Host] + descriptor os.Descriptor +} + +// section contains the content status of a specific section (e.g. Forwarder) +func (v *baseHealthSuite) TestDefaultInstallHealthy() { + interval := 1 * time.Second + + var output string + var err error + err = backoff.Retry(func() error { + output, err = v.Env().Agent.Client.Health() + if err != nil { + return err + } + return nil + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(interval), uint64(15))) + + assert.NoError(v.T(), err) + assert.Contains(v.T(), output, "Agent health: PASS") +} + +func (v *baseHealthSuite) TestDefaultInstallUnhealthy() { + // the fakeintake says that any API key is invalid by sending a 403 code + override := api.ResponseOverride{ + Endpoint: "/api/v1/validate", + StatusCode: 403, + Method: http.MethodGet, + Body: []byte("invalid API key"), + } + err := v.Env().FakeIntake.Client().ConfigureOverride(override) + require.NoError(v.T(), err) + + // restart the agent, which validates the key using the fakeintake at startup + v.UpdateEnv(awshost.Provisioner( + awshost.WithEC2InstanceOptions(ec2.WithOS(v.descriptor)), + awshost.WithAgentOptions(agentparams.WithAgentConfig("log_level: info\nforwarder_apikey_validation_interval: 1")), + )) + + require.EventuallyWithT(v.T(), func(collect *assert.CollectT) { + // forwarder should be unhealthy because the key is invalid + _, err = v.Env().Agent.Client.Health() + assert.ErrorContains(collect, err, "Agent health: FAIL") + assert.ErrorContains(collect, err, "=== 1 unhealthy components ===\nforwarder") + }, time.Second*30, time.Second) + + // the fakeintake now says that the api key is valid + override.StatusCode = 200 + override.Body = []byte("valid API key") + err = v.Env().FakeIntake.Client().ConfigureOverride(override) + require.NoError(v.T(), err) + + // the agent will check every minute if the key is valid, so wait at most 1m30 + require.EventuallyWithT(v.T(), func(collect *assert.CollectT) { + _, err = v.Env().Agent.Client.Health() + assert.NoError(collect, err) + }, time.Second*90, 3*time.Second) +} diff --git a/test/new-e2e/tests/agent-subcommands/health/health_commont_test.go b/test/new-e2e/tests/agent-subcommands/health/health_commont_test.go deleted file mode 100644 index 8a455a440d3c8..0000000000000 --- a/test/new-e2e/tests/agent-subcommands/health/health_commont_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package health - -import ( - "testing" - "time" - - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" - awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" - - "github.com/cenkalti/backoff" - "github.com/stretchr/testify/assert" -) - -type baseHealthSuite struct { - e2e.BaseSuite[environments.Host] -} - -func TestSubcommandSuite(t *testing.T) { - e2e.Run(t, &baseHealthSuite{}, e2e.WithProvisioner(awshost.Provisioner())) -} - -// section contains the content status of a specific section (e.g. Forwarder) -func (v *baseHealthSuite) TestDefaultInstallHealthy() { - interval := 1 * time.Second - - var output string - var err error - err = backoff.Retry(func() error { - output, err = v.Env().Agent.Client.Health() - if err != nil { - return err - } - return nil - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(interval), uint64(15))) - - assert.NoError(v.T(), err) - assert.Contains(v.T(), output, "Agent health: PASS") -} diff --git a/test/new-e2e/tests/agent-subcommands/health/health_nix_test.go b/test/new-e2e/tests/agent-subcommands/health/health_nix_test.go index 15c51d4041354..4a064a2d88f93 100644 --- a/test/new-e2e/tests/agent-subcommands/health/health_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/health/health_nix_test.go @@ -8,10 +8,9 @@ package health import ( "testing" - "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" - "github.com/stretchr/testify/assert" + "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" - "github.com/DataDog/datadog-agent/test/fakeintake/api" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" ) @@ -22,29 +21,8 @@ type linuxHealthSuite struct { func TestLinuxHealthSuite(t *testing.T) { t.Parallel() - e2e.Run(t, &linuxHealthSuite{}, e2e.WithProvisioner(awshost.Provisioner())) -} - -func (v *linuxHealthSuite) TestDefaultInstallUnhealthy() { - // the fakeintake says that any API key is invalid by sending a 403 code - override := api.ResponseOverride{ - Endpoint: "/api/v1/validate", - StatusCode: 403, - ContentType: "text/plain", - Body: []byte("invalid API key"), - } - v.Env().FakeIntake.Client().ConfigureOverride(override) - - // restart the agent, which validates the key using the fakeintake at startup - v.UpdateEnv(awshost.Provisioner( - awshost.WithAgentOptions(agentparams.WithAgentConfig("log_level: info\n")), - )) - - // agent should be unhealthy because the key is invalid - _, err := v.Env().Agent.Client.Health() - if err == nil { - assert.Fail(v.T(), "agent expected to be unhealthy, but no error found!") - return - } - assert.Contains(v.T(), err.Error(), "Agent health: FAIL") + suite := &linuxHealthSuite{baseHealthSuite{descriptor: os.UbuntuDefault}} + e2e.Run(t, suite, e2e.WithProvisioner(awshost.Provisioner( + awshost.WithEC2InstanceOptions(ec2.WithOS(suite.descriptor)), + ))) } diff --git a/test/new-e2e/tests/agent-subcommands/health/health_win_test.go b/test/new-e2e/tests/agent-subcommands/health/health_win_test.go index e2bf2f531ca7a..84da19fdcadc5 100644 --- a/test/new-e2e/tests/agent-subcommands/health/health_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/health/health_win_test.go @@ -8,12 +8,9 @@ package health import ( "testing" - "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" "github.com/DataDog/test-infra-definitions/components/os" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" - "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/test/fakeintake/api" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" ) @@ -24,28 +21,8 @@ type windowsHealthSuite struct { func TestWindowsHealthSuite(t *testing.T) { t.Parallel() - e2e.Run(t, &windowsHealthSuite{}, e2e.WithProvisioner(awshost.Provisioner(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))) -} - -func (v *windowsHealthSuite) TestDefaultInstallUnhealthy() { - v.T().Skip("FIXME: test is flaky") - // the fakeintake says that any API key is invalid by sending a 403 code - override := api.ResponseOverride{ - Endpoint: "/api/v1/validate", - StatusCode: 403, - ContentType: "text/plain", - Body: []byte("invalid API key"), - } - v.Env().FakeIntake.Client().ConfigureOverride(override) - // restart the agent, which validates the key using the fakeintake at startup - v.UpdateEnv(awshost.Provisioner(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault)), - awshost.WithAgentOptions(agentparams.WithAgentConfig("log_level: info\n")))) - - // agent should be unhealthy because the key is invalid - _, err := v.Env().Agent.Client.Health() - if err == nil { - assert.Fail(v.T(), "agent expected to be unhealthy, but no error found!") - return - } - assert.Contains(v.T(), err.Error(), "Agent health: FAIL") + suite := &windowsHealthSuite{baseHealthSuite{descriptor: os.WindowsDefault}} + e2e.Run(t, suite, e2e.WithProvisioner(awshost.Provisioner( + awshost.WithEC2InstanceOptions(ec2.WithOS(suite.descriptor)), + ))) } diff --git a/test/new-e2e/tests/agent-subcommands/hostname/hostname_azure_nix_test.go b/test/new-e2e/tests/agent-subcommands/hostname/hostname_azure_nix_test.go new file mode 100644 index 0000000000000..7ddd73a1b1bdc --- /dev/null +++ b/test/new-e2e/tests/agent-subcommands/hostname/hostname_azure_nix_test.go @@ -0,0 +1,84 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package hostname + +import ( + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + azurehost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/azure/host/linux" +) + +type linuxAzureHostnameSuite struct { + e2e.BaseSuite[environments.Host] +} + +func TestLinuxAzureHostnameSuite(t *testing.T) { + t.Parallel() + e2e.Run(t, &linuxAzureHostnameSuite{}, e2e.WithProvisioner(azurehost.ProvisionerNoFakeIntake())) +} + +func (v *linuxAzureHostnameSuite) TestAgentConfigHostnameFileOverride() { + fileContent := "hostname.from.file" + v.UpdateEnv(azurehost.ProvisionerNoFakeIntake(azurehost.WithAgentOptions(agentparams.WithFile("/tmp/var/hostname", fileContent, false), agentparams.WithAgentConfig("hostname_file: /tmp/var/hostname")))) + + hostname := v.Env().Agent.Client.Hostname() + assert.Equal(v.T(), fileContent, hostname) +} + +// https://github.com/DataDog/datadog-agent/blob/main/pkg/util/hostname/README.md#the-current-logic +// azure specific logic from pkg/util/cloudproviders/azure/azure.go +func (v *linuxAzureHostnameSuite) TestAgentHostnameStyle() { + hostname := v.Env().RemoteHost.MustExecute("hostname") + hostname = strings.TrimSpace(hostname) + + metadataStr := v.Env().RemoteHost.MustExecute(`curl -s -H "Metadata: true" http://169.254.169.254/metadata/instance/compute?api-version=2021-02-01`) + + var metadata struct { + VMID string + Name string + ResourceGroupName string + SubscriptionID string + OsProfile struct { + ComputerName string + } + } + + err := json.Unmarshal([]byte(metadataStr), &metadata) + require.NoError(v.T(), err) + + hostnameStyles := map[string]interface{}{ + "": hostname, + "os": hostname, + "vmid": metadata.VMID, + "name": metadata.Name, + "name_and_resource_group": fmt.Sprintf("%s.%s", metadata.Name, metadata.ResourceGroupName), + "full": fmt.Sprintf("%s.%s.%s", metadata.Name, metadata.ResourceGroupName, metadata.SubscriptionID), + "os_computer_name": strings.ToLower(metadata.OsProfile.ComputerName), + } + + for hostnameStyle, expected := range hostnameStyles { + v.Run("hostname_style_"+hostnameStyle, func() { + agentConfig := "" + if hostnameStyle != "" { + agentConfig = "azure_hostname_style: " + hostnameStyle + } + + v.UpdateEnv(azurehost.ProvisionerNoFakeIntake(azurehost.WithAgentOptions(agentparams.WithAgentConfig(agentConfig)))) + + hostname := v.Env().Agent.Client.Hostname() + v.Equal(expected, hostname) + }) + } +} diff --git a/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go b/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go index 3429569e29e7a..8f527460691ad 100644 --- a/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" "github.com/stretchr/testify/assert" + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" @@ -23,6 +24,9 @@ type windowsHostnameSuite struct { } func TestWindowsHostnameSuite(t *testing.T) { + // WINA-1014 + flake.Mark(t) + t.Parallel() osOption := awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault)) e2e.Run(t, &windowsHostnameSuite{baseHostnameSuite: baseHostnameSuite{osOption: osOption}}, e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake(osOption))) diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go index d4e099410dda6..460c8bd4d88ca 100644 --- a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" @@ -27,6 +28,8 @@ type windowsSecretSuite struct { } func TestWindowsSecretSuite(t *testing.T) { + // WINA-1014 + flake.Mark(t) t.Parallel() e2e.Run(t, &windowsSecretSuite{}, e2e.WithProvisioner(awshost.Provisioner(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))) } diff --git a/test/new-e2e/tests/agent-subcommands/status/fixtures/custom_check.py b/test/new-e2e/tests/agent-subcommands/status/fixtures/custom_check.py new file mode 100644 index 0000000000000..e1883ad2d2135 --- /dev/null +++ b/test/new-e2e/tests/agent-subcommands/status/fixtures/custom_check.py @@ -0,0 +1,6 @@ +from checks import AgentCheck + + +class HelloCheck(AgentCheck): + def check(self, instance): + self.set_metadata('custom_metadata_key', 'custom_metadata_value') diff --git a/test/new-e2e/tests/agent-subcommands/status/fixtures/custom_check.yaml b/test/new-e2e/tests/agent-subcommands/status/fixtures/custom_check.yaml new file mode 100644 index 0000000000000..015bfa90bf496 --- /dev/null +++ b/test/new-e2e/tests/agent-subcommands/status/fixtures/custom_check.yaml @@ -0,0 +1,3 @@ +init_config: +instances: + [{}] \ No newline at end of file diff --git a/test/new-e2e/tests/agent-subcommands/status/status_common_test.go b/test/new-e2e/tests/agent-subcommands/status/status_common_test.go index 6ee104d659ad7..669e0ff7ec163 100644 --- a/test/new-e2e/tests/agent-subcommands/status/status_common_test.go +++ b/test/new-e2e/tests/agent-subcommands/status/status_common_test.go @@ -6,6 +6,7 @@ package status import ( + _ "embed" "fmt" "regexp" "time" @@ -17,6 +18,12 @@ import ( "github.com/stretchr/testify/require" ) +//go:embed fixtures/custom_check.yaml +var customCheckYaml []byte + +//go:embed fixtures/custom_check.py +var customCheckPython []byte + type baseStatusSuite struct { e2e.BaseSuite[environments.Host] } @@ -78,6 +85,18 @@ func verifySectionContent(t require.TestingT, statusOutput string, section expec } } +// fetchAndCheckStatus execute the Agent status subcommand and compare it's output with the provided expectedSections via the verifySectionContent +func fetchAndCheckStatus(v *baseStatusSuite, expectedSections []expectedSection) { + // the test will not run until the core-agent is running, but it can run before the process-agent or trace-agent are running + require.EventuallyWithT(v.T(), func(t *assert.CollectT) { + statusOutput := v.Env().Agent.Client.Status() + + for _, section := range expectedSections { + verifySectionContent(t, statusOutput.Content, section) + } + }, 2*time.Minute, 20*time.Second) +} + func (v *baseStatusSuite) TestDefaultInstallStatus() { expectedSections := []expectedSection{ { @@ -181,12 +200,5 @@ func (v *baseStatusSuite) TestDefaultInstallStatus() { }, } - // the test will not run until the core-agent is running, but it can run before the process-agent or trace-agent are running - require.EventuallyWithT(v.T(), func(t *assert.CollectT) { - status := v.Env().Agent.Client.Status() - - for _, section := range expectedSections { - verifySectionContent(t, status.Content, section) - } - }, 2*time.Minute, 20*time.Second) + fetchAndCheckStatus(v, expectedSections) } diff --git a/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go b/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go index c531e88af0ea4..ab805b2d90db7 100644 --- a/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go @@ -29,25 +29,48 @@ func (v *linuxStatusSuite) TestStatusHostname() { metadata := client.NewEC2Metadata(v.T(), v.Env().RemoteHost.Host, v.Env().RemoteHost.OSFamily) resourceID := metadata.Get("instance-id") - status := v.Env().Agent.Client.Status() - - expected := expectedSection{ - name: `Hostname`, - shouldBePresent: true, - shouldContain: []string{fmt.Sprintf("hostname: %v", resourceID), "hostname provider: aws"}, + expectedSections := []expectedSection{ + { + name: `Hostname`, + shouldBePresent: true, + shouldContain: []string{fmt.Sprintf("hostname: %v", resourceID), "hostname provider: aws"}, + }, } - verifySectionContent(v.T(), status.Content, expected) + fetchAndCheckStatus(&v.baseStatusSuite, expectedSections) } func (v *linuxStatusSuite) TestFIPSProxyStatus() { v.UpdateEnv(awshost.ProvisionerNoFakeIntake(awshost.WithAgentOptions(agentparams.WithAgentConfig("fips.enabled: true")))) - expectedSection := expectedSection{ - name: `Agent \(.*\)`, - shouldBePresent: true, - shouldContain: []string{"FIPS proxy"}, + expectedSections := []expectedSection{ + { + name: `Agent \(.*\)`, + shouldBePresent: true, + shouldContain: []string{"FIPS proxy"}, + }, } - status := v.Env().Agent.Client.Status() - verifySectionContent(v.T(), status.Content, expectedSection) + + fetchAndCheckStatus(&v.baseStatusSuite, expectedSections) +} + +// This test asserts the presence of metadata sent by Python checks in the status subcommand output. +func (v *linuxStatusSuite) TestChecksMetadataUnix() { + v.UpdateEnv(awshost.ProvisionerNoFakeIntake(awshost.WithAgentOptions( + agentparams.WithFile("/etc/datadog-agent/conf.d/custom_check.yaml", string(customCheckYaml), true), + agentparams.WithFile("/etc/datadog-agent/checks.d/custom_check.py", string(customCheckPython), true), + ))) + + expectedSections := []expectedSection{ + { + name: "Collector", + shouldBePresent: true, + shouldContain: []string{"Instance ID:", "[OK]", + "metadata:", + "custom_metadata_key: custom_metadata_value", + }, + }, + } + + fetchAndCheckStatus(&v.baseStatusSuite, expectedSections) } diff --git a/test/new-e2e/tests/agent-subcommands/status/status_win_test.go b/test/new-e2e/tests/agent-subcommands/status/status_win_test.go index edb8ebc34233c..eef19d962c363 100644 --- a/test/new-e2e/tests/agent-subcommands/status/status_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/status/status_win_test.go @@ -9,6 +9,7 @@ import ( "fmt" "testing" + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" "github.com/DataDog/test-infra-definitions/components/os" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" @@ -30,13 +31,37 @@ func (v *windowsStatusSuite) TestStatusHostname() { metadata := client.NewEC2Metadata(v.T(), v.Env().RemoteHost.Host, v.Env().RemoteHost.OSFamily) resourceID := metadata.Get("instance-id") - status := v.Env().Agent.Client.Status() + expectedSections := []expectedSection{ + { + name: `Hostname`, + shouldBePresent: true, + shouldContain: []string{fmt.Sprintf("instance-id: %v", resourceID), "hostname provider: os"}, + }, + } + + fetchAndCheckStatus(&v.baseStatusSuite, expectedSections) +} + +// This test asserts the presence of metadata sent by Python checks in the status subcommand output. +func (v *windowsStatusSuite) TestChecksMetadataWindows() { + v.UpdateEnv(awshost.ProvisionerNoFakeIntake( + awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault)), + awshost.WithAgentOptions( + agentparams.WithFile("C:/ProgramData/Datadog/conf.d/custom_check.d/conf.yaml", string(customCheckYaml), true), + agentparams.WithFile("C:/ProgramData/Datadog/checks.d/custom_check.py", string(customCheckPython), true), + ))) - expected := expectedSection{ - name: `Hostname`, - shouldBePresent: true, - shouldContain: []string{fmt.Sprintf("instance-id: %v", resourceID), "hostname provider: os"}, + expectedSections := []expectedSection{ + { + name: "Collector", + shouldBePresent: true, + shouldContain: []string{"Instance ID:", "[OK]", + // Following lines check the presence of checks metadata + "metadata:", + "custom_metadata_key: custom_metadata_value", + }, + }, } - verifySectionContent(v.T(), status.Content, expected) + fetchAndCheckStatus(&v.baseStatusSuite, expectedSections) } diff --git a/test/new-e2e/tests/containers/ecs_test.go b/test/new-e2e/tests/containers/ecs_test.go index 38974e31821d3..55c4dd15f12b4 100644 --- a/test/new-e2e/tests/containers/ecs_test.go +++ b/test/new-e2e/tests/containers/ecs_test.go @@ -207,7 +207,8 @@ func (suite *ecsSuite) TestNginxECS() { `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:nginx$`, `^ecs_launch_type:ec2$`, - `^git.commit.sha:`, // org.opencontainers.image.revision docker image label + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-nginx-ec2$`, + `^git.commit.sha:`, // org.opencontainers.image.revision docker image label `^git.repository_url:https://github.com/DataDog/test-infra-definitions$`, // org.opencontainers.image.source docker image label `^image_id:sha256:`, `^image_name:ghcr.io/datadog/apps-nginx-server$`, @@ -237,7 +238,8 @@ func (suite *ecsSuite) TestNginxECS() { `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:nginx$`, `^ecs_launch_type:ec2$`, - `^git.commit.sha:`, // org.opencontainers.image.revision docker image label + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-nginx-ec2$`, + `^git.commit.sha:`, // org.opencontainers.image.revision docker image label `^git.repository_url:https://github.com/DataDog/test-infra-definitions$`, // org.opencontainers.image.source docker image label `^image_id:sha256:`, `^image_name:ghcr.io/datadog/apps-nginx-server$`, @@ -269,6 +271,7 @@ func (suite *ecsSuite) TestRedisECS() { `^docker_image:public.ecr.aws/docker/library/redis:latest$`, `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:redis$`, + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-redis-ec2$`, `^ecs_launch_type:ec2$`, `^image_id:sha256:`, `^image_name:public.ecr.aws/docker/library/redis$`, @@ -297,6 +300,7 @@ func (suite *ecsSuite) TestRedisECS() { `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:redis$`, `^ecs_launch_type:ec2$`, + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-redis-ec2$`, `^image_id:sha256:`, `^image_name:public.ecr.aws/docker/library/redis$`, `^image_tag:latest$`, @@ -462,6 +466,7 @@ func (suite *ecsSuite) TestCPU() { `^docker_image:ghcr.io/colinianking/stress-ng:409201de7458c639c68088d28ec8270ef599fe47$`, `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:stress-ng$`, + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-stress-ng$`, `^git.commit.sha:`, `^git.repository_url:https://github.com/ColinIanKing/stress-ng$`, `^image_id:sha256:`, @@ -506,7 +511,8 @@ func (suite *ecsSuite) testDogstatsd(taskName string) { `^docker_image:ghcr.io/datadog/apps-dogstatsd:main$`, `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:dogstatsd$`, - `^git.commit.sha:`, // org.opencontainers.image.revision docker image label + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-dogstatsd-ud[ps]$`, + `^git.commit.sha:`, // org.opencontainers.image.revision docker image label `^git.repository_url:https://github.com/DataDog/test-infra-definitions$`, // org.opencontainers.image.source docker image label `^image_id:sha256:`, `^image_name:ghcr.io/datadog/apps-dogstatsd$`, @@ -536,8 +542,9 @@ func (suite *ecsSuite) TestPrometheus() { `^docker_image:ghcr.io/datadog/apps-prometheus:main$`, `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:prometheus$`, + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-prometheus$`, `^endpoint:http://.*:8080/metrics$`, - `^git.commit.sha:`, // org.opencontainers.image.revision docker image label + `^git.commit.sha:`, // org.opencontainers.image.revision docker image label `^git.repository_url:https://github.com/DataDog/test-infra-definitions$`, // org.opencontainers.image.source docker image label `^image_id:sha256:`, `^image_name:ghcr.io/datadog/apps-prometheus$`, diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go index af2f9917a2f9e..2badd72b23401 100644 --- a/test/new-e2e/tests/containers/k8s_test.go +++ b/test/new-e2e/tests/containers/k8s_test.go @@ -929,8 +929,16 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, }, 5*time.Minute, 10*time.Second, "The deployment with name %s in namespace %s does not exist or does not have the auto detected languages annotation", name, namespace) } + // Record old pod, so we can be sure we are not looking at the incorrect one after deletion + oldPods, err := suite.K8sClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", name).String(), + }) + suite.Require().NoError(err) + suite.Require().Len(oldPods.Items, 1) + oldPod := oldPods.Items[0] + // Delete the pod to ensure it is recreated after the admission controller is deployed - err := suite.K8sClient.CoreV1().Pods(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ + err = suite.K8sClient.CoreV1().Pods(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", name).String(), }) suite.Require().NoError(err) @@ -948,6 +956,9 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, return } pod = pods.Items[0] + if !assert.NotEqual(c, oldPod.Name, pod.Name) { + return + } }, 2*time.Minute, 10*time.Second, "Failed to witness the creation of pod with name %s in namespace %s", name, namespace) suite.Require().Len(pod.Spec.Containers, 1) diff --git a/test/new-e2e/tests/cspm/cspm_test.go b/test/new-e2e/tests/cspm/cspm_test.go new file mode 100644 index 0000000000000..2c7a03f8b67b1 --- /dev/null +++ b/test/new-e2e/tests/cspm/cspm_test.go @@ -0,0 +1,303 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package cspm contains the e2e tests for cspm +package cspm + +import ( + "context" + _ "embed" + "encoding/json" + "fmt" + "slices" + "testing" + "time" + + "k8s.io/apimachinery/pkg/fields" + + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/kubernetes" +) + +type cspmTestSuite struct { + e2e.BaseSuite[environments.Kubernetes] +} + +type findings = map[string][]map[string]string + +var expectedFindingsMasterEtcdNode = findings{ + "cis-kubernetes-1.5.1-1.1.12": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.16": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.19": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.21": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.22": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.23": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.24": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.25": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.26": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.33": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.6": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.3.2": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.3.3": []map[string]string{ + { + "result": "passed", + }, + }, + "cis-kubernetes-1.5.1-1.3.4": []map[string]string{ + { + "result": "passed", + }, + }, + "cis-kubernetes-1.5.1-1.3.5": []map[string]string{ + { + "result": "passed", + }, + }, + "cis-kubernetes-1.5.1-1.3.6": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.3.7": []map[string]string{ + { + "result": "passed", + }, + }, + "cis-kubernetes-1.5.1-1.4.1": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-3.2.1": []map[string]string{ + { + "result": "failed", + }, + }, +} +var expectedFindingsWorkerNode = findings{ + "cis-kubernetes-1.5.1-4.2.1": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.3": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.4": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.5": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.6": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.10": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.12": []map[string]string{ + { + "result": "failed", + }, + }, +} + +//go:embed values.yaml +var values string + +func TestCSPM(t *testing.T) { + e2e.Run(t, &cspmTestSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner(awskubernetes.WithAgentOptions(kubernetesagentparams.WithHelmValues(values), kubernetesagentparams.WithoutDualShipping())))) +} + +func (s *cspmTestSuite) TestFindings() { + res, err := s.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(context.Background(), metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", s.Env().Agent.LinuxNodeAgent.LabelSelectors["app"]).String(), + }) + require.NoError(s.T(), err) + require.Len(s.T(), res.Items, 1) + agentPod := res.Items[0] + _, _, err = s.Env().KubernetesCluster.KubernetesClient.PodExec("datadog", agentPod.Name, "security-agent", []string{"security-agent", "compliance", "check", "--dump-reports", "/tmp/reports", "--report"}) + require.NoError(s.T(), err) + dumpContent, _, err := s.Env().KubernetesCluster.KubernetesClient.PodExec("datadog", agentPod.Name, "security-agent", []string{"cat", "/tmp/reports"}) + require.NoError(s.T(), err) + findings, err := parseFindingOutput(dumpContent) + require.NoError(s.T(), err) + s.checkFindings(findings, mergeFindings(expectedFindingsMasterEtcdNode, expectedFindingsWorkerNode)) +} + +func (s *cspmTestSuite) TestMetrics() { + s.T().Log("Waiting for datadog.security_agent.compliance.running metrics") + assert.EventuallyWithT(s.T(), func(c *assert.CollectT) { + + metrics, err := s.Env().FakeIntake.Client().FilterMetrics("datadog.security_agent.compliance.running") + if !assert.NoError(c, err) { + return + } + if assert.NotEmpty(c, metrics) { + s.T().Log("Metrics found: datadog.security_agent.compliance.running") + } + }, 2*time.Minute, 10*time.Second) + + s.T().Log("Waiting for datadog.security_agent.compliance.containers_running metrics") + assert.EventuallyWithT(s.T(), func(c *assert.CollectT) { + metrics, err := s.Env().FakeIntake.Client().FilterMetrics("datadog.security_agent.compliance.containers_running") + if !assert.NoError(c, err) { + return + } + if assert.NotEmpty(c, metrics) { + s.T().Log("Metrics found: datadog.security_agent.compliance.containers_running") + } + }, 2*time.Minute, 10*time.Second) + +} +func (s *cspmTestSuite) checkFindings(findings, expectedFindings findings) { + s.T().Helper() + checkedRule := []string{} + for expectedRule, expectedRuleFindinds := range expectedFindings { + assert.Contains(s.T(), findings, expectedRule) + for _, expectedFinding := range expectedRuleFindinds { + found := false + for _, finding := range findings[expectedRule] { + if isSubset(expectedFinding, finding) { + found = true + break + } + } + assert.Truef(s.T(), found, "unexpected finding %v for rule %s", findings[expectedRule], expectedRule) + checkedRule = append(checkedRule, expectedRule) + } + } + for rule, ruleFindings := range findings { + if slices.Contains(checkedRule, rule) { + continue + } + for _, ruleFinding := range ruleFindings { + fmt.Printf("rule %s finding %v\n", rule, ruleFinding["result"]) + } + } + for rule, ruleFindings := range findings { + if slices.Contains(checkedRule, rule) { + continue + } + for _, ruleFinding := range ruleFindings { + assert.NotContains(s.T(), []string{"failed", "error"}, ruleFinding["result"], fmt.Sprintf("finding for rule %s not expected to be in failed or error state", rule)) + } + } + +} + +func isSubset(a, b map[string]string) bool { + for k, v := range a { + if vb, found := b[k]; !found || vb != v { + return false + } + } + return true +} + +func mergeFindings(a, b findings) findings { + for k, v := range b { + a[k] = v + } + return a +} + +func parseFindingOutput(output string) (findings, error) { + + result := map[string]any{} + parsedResult := findings{} + err := json.Unmarshal([]byte(output), &result) + if err != nil { + return nil, err + } + for rule, ruleFindings := range result { + ruleFindingsCasted, ok := ruleFindings.([]any) + if !ok { + return nil, fmt.Errorf("failed to parse output: %s for rule %s cannot be casted into []any", ruleFindings, rule) + } + parsedRuleFinding := []map[string]string{} + for _, finding := range ruleFindingsCasted { + findingCasted, ok := finding.(map[string]any) + if !ok { + return nil, fmt.Errorf("failed to parse output: %s for rule %s cannot be casted into map[string]any", finding, rule) + } + parsedFinding := map[string]string{} + for k, v := range findingCasted { + if _, ok := v.(string); ok { + parsedFinding[k] = v.(string) + } + } + parsedRuleFinding = append(parsedRuleFinding, parsedFinding) + + } + parsedResult[rule] = parsedRuleFinding + + } + return parsedResult, nil +} diff --git a/test/new-e2e/tests/cspm/values.yaml b/test/new-e2e/tests/cspm/values.yaml new file mode 100644 index 0000000000000..4a9aff86ca347 --- /dev/null +++ b/test/new-e2e/tests/cspm/values.yaml @@ -0,0 +1,4 @@ +datadog: + securityAgent: + compliance: + enabled: true diff --git a/test/new-e2e/tests/discovery/linux_test.go b/test/new-e2e/tests/discovery/linux_test.go index fe0f98a38d142..d48cc30962b7c 100644 --- a/test/new-e2e/tests/discovery/linux_test.go +++ b/test/new-e2e/tests/discovery/linux_test.go @@ -29,9 +29,6 @@ var agentConfigStr string //go:embed testdata/config/system_probe_config.yaml var systemProbeConfigStr string -//go:embed testdata/config/check_config.yaml -var checkConfigStr string - type linuxTestSuite struct { e2e.BaseSuite[environments.Host] } @@ -42,7 +39,6 @@ func TestLinuxTestSuite(t *testing.T) { agentParams := []func(*agentparams.Params) error{ agentparams.WithAgentConfig(agentConfigStr), agentparams.WithSystemProbeConfig(systemProbeConfigStr), - agentparams.WithFile("/etc/datadog-agent/conf.d/service_discovery.d/conf.yaml", checkConfigStr, true), } options := []e2e.SuiteOption{ e2e.WithProvisioner(awshost.Provisioner(awshost.WithAgentOptions(agentParams...))), diff --git a/test/new-e2e/tests/discovery/testdata/config/check_config.yaml b/test/new-e2e/tests/discovery/testdata/config/check_config.yaml deleted file mode 100644 index acab3a6421cab..0000000000000 --- a/test/new-e2e/tests/discovery/testdata/config/check_config.yaml +++ /dev/null @@ -1 +0,0 @@ -instances: [{}] diff --git a/test/new-e2e/tests/installer/host/fixtures/run_http_server.sh b/test/new-e2e/tests/installer/host/fixtures/run_http_server.sh index 3055ed5f5927a..66b2ca5290cbb 100755 --- a/test/new-e2e/tests/installer/host/fixtures/run_http_server.sh +++ b/test/new-e2e/tests/installer/host/fixtures/run_http_server.sh @@ -8,6 +8,4 @@ python3 /opt/fixtures/http_server.py >/tmp/server.log 2>&1 & PID=$! disown $PID -while ! curl -s http://localhost:8080 > /dev/null; do - sleep 1 -done +timeout 30s bash -c 'while ! curl -s http://localhost:8080 > /dev/null; do sleep 1; done' diff --git a/test/new-e2e/tests/installer/host/host.go b/test/new-e2e/tests/installer/host/host.go index a19ab6b39d12a..c8c944d0f40f1 100644 --- a/test/new-e2e/tests/installer/host/host.go +++ b/test/new-e2e/tests/installer/host/host.go @@ -7,6 +7,7 @@ package host import ( + "encoding/json" "fmt" "io/fs" "os/user" @@ -14,12 +15,14 @@ import ( "sort" "strconv" "strings" + "sync" "testing" "time" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" e2eos "github.com/DataDog/test-infra-definitions/components/os" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -688,3 +691,129 @@ func (s *State) AssertUnitsDead(names ...string) { assert.Equal(s.t, Dead, unit.SubState, "unit %v is not running", name) } } + +// LocalCDN is a local CDN for testing. +type LocalCDN struct { + host *Host + // DirPath is the path to the local CDN directory. + DirPath string + lock sync.Mutex +} + +type orderConfig struct { + // Order is the order of the layers. + Order []string `json:"order"` +} + +// NewLocalCDN creates a new local CDN. +func NewLocalCDN(host *Host) *LocalCDN { + localCDNPath := fmt.Sprintf("/tmp/local_cdn/%s", uuid.New().String()) + host.remote.MustExecute(fmt.Sprintf("mkdir -p %s", localCDNPath)) + + // Create order file + orderPath := filepath.Join(localCDNPath, "configuration_order") + orderContent := orderConfig{ + Order: []string{}, + } + orderBytes, err := json.Marshal(orderContent) + require.NoError(host.t, err) + + _, err = host.remote.WriteFile(orderPath, orderBytes) + require.NoError(host.t, err) + + return &LocalCDN{ + host: host, + DirPath: localCDNPath, + lock: sync.Mutex{}, + } +} + +// AddLayer adds a layer to the local CDN. It'll be last in order. +func (c *LocalCDN) AddLayer(name string, content string) error { + c.lock.Lock() + defer c.lock.Unlock() + + layerPath := filepath.Join(c.DirPath, name) + + jsonContent := fmt.Sprintf(`{"name": "%s","config": {%s}}`, name, content) + + _, err := c.host.remote.WriteFile(layerPath, []byte(jsonContent)) + require.NoError(c.host.t, err) + + // Add at the end of the order file + orderPath := filepath.Join(c.DirPath, "configuration_order") + orderContent := orderConfig{} + orderBytes, err := c.host.remote.ReadFile(orderPath) + require.NoError(c.host.t, err) + err = json.Unmarshal(orderBytes, &orderContent) + require.NoError(c.host.t, err) + orderContent.Order = append(orderContent.Order, name) + orderBytes, err = json.Marshal(orderContent) + require.NoError(c.host.t, err) + _, err = c.host.remote.WriteFile(orderPath, orderBytes) + require.NoError(c.host.t, err) + + return nil +} + +// UpdateLayer updates a layer in the local CDN. +func (c *LocalCDN) UpdateLayer(name string, content string) error { + c.lock.Lock() + defer c.lock.Unlock() + + layerPath := filepath.Join(c.DirPath, name) + + jsonContent := fmt.Sprintf(`{"name": "%s","config": {%s}}`, name, content) + + _, err := c.host.remote.WriteFile(layerPath, []byte(jsonContent)) + require.NoError(c.host.t, err) + + return nil +} + +// RemoveLayer removes a layer from the local CDN. +func (c *LocalCDN) RemoveLayer(name string) error { + c.lock.Lock() + defer c.lock.Unlock() + + layerPath := filepath.Join(c.DirPath, name) + err := c.host.remote.Remove(layerPath) + require.NoError(c.host.t, err) + + // Remove from order file + orderPath := filepath.Join(c.DirPath, "configuration_order") + orderContent := orderConfig{} + orderBytes, err := c.host.remote.ReadFile(orderPath) + require.NoError(c.host.t, err) + err = json.Unmarshal(orderBytes, &orderContent) + require.NoError(c.host.t, err) + newOrder := []string{} + for _, layer := range orderContent.Order { + if layer != name { + newOrder = append(newOrder, layer) + } + } + orderContent.Order = newOrder + orderBytes, err = json.Marshal(orderContent) + require.NoError(c.host.t, err) + _, err = c.host.remote.WriteFile(orderPath, orderBytes) + require.NoError(c.host.t, err) + return nil +} + +// Reorder reorders the layers in the local CDN. +func (c *LocalCDN) Reorder(orderedLayerNames []string) error { + c.lock.Lock() + defer c.lock.Unlock() + + orderPath := filepath.Join(c.DirPath, "configuration_order") + orderContent := orderConfig{ + Order: orderedLayerNames, + } + orderBytes, err := json.Marshal(orderContent) + require.NoError(c.host.t, err) + _, err = c.host.remote.WriteFile(orderPath, orderBytes) + require.NoError(c.host.t, err) + + return nil +} diff --git a/test/new-e2e/tests/installer/unix/all_packages.go b/test/new-e2e/tests/installer/unix/all_packages.go new file mode 100644 index 0000000000000..ee4c7969ee11f --- /dev/null +++ b/test/new-e2e/tests/installer/unix/all_packages.go @@ -0,0 +1,42 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package installer contains tests for the datadog installer +package installer + +import ( + "os" + "testing" +) + +// InstallMethodOption is the type for the install method to use for the tests +type InstallMethodOption string + +const ( + // InstallMethodInstallScript is the default install method + InstallMethodInstallScript InstallMethodOption = "install_script" + // InstallMethodAnsible is the install method for Ansible + InstallMethodAnsible InstallMethodOption = "ansible" + // InstallMethodWindows is the install method for Windows + InstallMethodWindows InstallMethodOption = "windows" +) + +// GetInstallMethodFromEnv returns the install method to use for the tests +func GetInstallMethodFromEnv(t *testing.T) InstallMethodOption { + supportedValues := []string{string(InstallMethodAnsible), string(InstallMethodInstallScript), string(InstallMethodWindows)} + envValue := os.Getenv("FLEET_INSTALL_METHOD") + switch envValue { + case "install_script": + return InstallMethodInstallScript + case "ansible": + return InstallMethodAnsible + case "windows": + return InstallMethodWindows + default: + t.Logf("FLEET_INSTALL_METHOD is not set or has an unsupported value. Supported values are: %v", supportedValues) + t.Log("Using default install method: install_script") + return InstallMethodInstallScript + } +} diff --git a/test/new-e2e/tests/installer/all_packages_test.go b/test/new-e2e/tests/installer/unix/all_packages_test.go similarity index 91% rename from test/new-e2e/tests/installer/all_packages_test.go rename to test/new-e2e/tests/installer/unix/all_packages_test.go index 67d8186196aba..7f2360f89a1d2 100644 --- a/test/new-e2e/tests/installer/all_packages_test.go +++ b/test/new-e2e/tests/installer/unix/all_packages_test.go @@ -24,12 +24,12 @@ import ( "github.com/stretchr/testify/require" ) -type packageTests func(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite +type packageTests func(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite type packageTestsWithSkipedFlavors struct { t packageTests skippedFlavors []e2eos.Descriptor - skippedInstallationMethods []installMethodOption + skippedInstallationMethods []InstallMethodOption } var ( @@ -50,7 +50,7 @@ var ( packagesTestsWithSkippedFlavors = []packageTestsWithSkipedFlavors{ {t: testInstaller}, {t: testAgent}, - {t: testApmInjectAgent, skippedFlavors: []e2eos.Descriptor{e2eos.CentOS7, e2eos.RedHat9, e2eos.Fedora37, e2eos.Suse15}, skippedInstallationMethods: []installMethodOption{installMethodAnsible}}, + {t: testApmInjectAgent, skippedFlavors: []e2eos.Descriptor{e2eos.CentOS7, e2eos.RedHat9, e2eos.Fedora37, e2eos.Suse15}, skippedInstallationMethods: []InstallMethodOption{InstallMethodAnsible}}, {t: testUpgradeScenario}, } ) @@ -64,7 +64,7 @@ func shouldSkipFlavor(flavors []e2eos.Descriptor, flavor e2eos.Descriptor) bool return false } -func shouldSkipInstallMethod(methods []installMethodOption, method installMethodOption) bool { +func shouldSkipInstallMethod(methods []InstallMethodOption, method InstallMethodOption) bool { for _, m := range methods { if m == method { return true @@ -73,28 +73,13 @@ func shouldSkipInstallMethod(methods []installMethodOption, method installMethod return false } -func getInstallMethodFromEnv() installMethodOption { - supportedValues := []string{string(installMethodAnsible), string(installMethodInstallScript)} - envValue := os.Getenv("FLEET_INSTALL_METHOD") - switch envValue { - case "install_script": - return installMethodInstallScript - case "ansible": - return installMethodAnsible - default: - panic(fmt.Sprintf("unsupported install method: %s. Supported values are: %v", envValue, supportedValues)) - } -} - func TestPackages(t *testing.T) { - if _, ok := os.LookupEnv("E2E_PIPELINE_ID"); !ok { t.Log("E2E_PIPELINE_ID env var is not set, this test requires this variable to be set to work") t.FailNow() } - method := getInstallMethodFromEnv() - + method := GetInstallMethodFromEnv(t) var flavors []e2eos.Descriptor for _, flavor := range amd64Flavors { flavor.Architecture = e2eos.AMD64Arch @@ -114,7 +99,7 @@ func TestPackages(t *testing.T) { continue } // TODO: remove once ansible+suse is fully supported - if flavor.Flavor == e2eos.Suse && method == installMethodAnsible { + if flavor.Flavor == e2eos.Suse && method == InstallMethodAnsible { continue } @@ -126,10 +111,6 @@ func TestPackages(t *testing.T) { flake.Mark(t) } - // FIXME: Ansible tests are flaky on multiple tests/os - if method == installMethodAnsible { - flake.Mark(t) - } opts := []awshost.ProvisionerOption{ awshost.WithEC2InstanceOptions(ec2.WithOSArch(flavor, flavor.Architecture)), awshost.WithoutAgent(), @@ -159,17 +140,10 @@ type packageBaseSuite struct { pkg string arch e2eos.Architecture os e2eos.Descriptor - installMethod installMethodOption + installMethod InstallMethodOption } -type installMethodOption string - -const ( - installMethodInstallScript installMethodOption = "install_script" - installMethodAnsible installMethodOption = "ansible" -) - -func newPackageSuite(pkg string, os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption, opts ...awshost.ProvisionerOption) packageBaseSuite { +func newPackageSuite(pkg string, os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption, opts ...awshost.ProvisionerOption) packageBaseSuite { return packageBaseSuite{ os: os, arch: arch, @@ -214,14 +188,14 @@ func (s *packageBaseSuite) RunInstallScriptWithError(params ...string) error { func (s *packageBaseSuite) RunInstallScript(params ...string) { switch s.installMethod { - case installMethodInstallScript: + case InstallMethodInstallScript: // bugfix for https://major.io/p/systemd-in-fedora-22-failed-to-restart-service-access-denied/ if s.os.Flavor == e2eos.CentOS && s.os.Version == e2eos.CentOS7.Version { s.Env().RemoteHost.MustExecute("sudo systemctl daemon-reexec") } err := s.RunInstallScriptWithError(params...) require.NoErrorf(s.T(), err, "installer not properly installed. logs: \n%s\n%s", s.Env().RemoteHost.MustExecute("cat /tmp/datadog-installer-stdout.log"), s.Env().RemoteHost.MustExecute("cat /tmp/datadog-installer-stderr.log")) - case installMethodAnsible: + case InstallMethodAnsible: // Install ansible then install the agent ansiblePrefix := s.installAnsible(s.os) diff --git a/test/new-e2e/tests/installer/package_agent_test.go b/test/new-e2e/tests/installer/unix/package_agent_test.go similarity index 88% rename from test/new-e2e/tests/installer/package_agent_test.go rename to test/new-e2e/tests/installer/unix/package_agent_test.go index b1c672952ac92..30e4f52630a0d 100644 --- a/test/new-e2e/tests/installer/package_agent_test.go +++ b/test/new-e2e/tests/installer/unix/package_agent_test.go @@ -36,7 +36,7 @@ type packageAgentSuite struct { packageBaseSuite } -func testAgent(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite { +func testAgent(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite { return &packageAgentSuite{ packageBaseSuite: newPackageSuite("agent", os, arch, method, awshost.WithoutFakeIntake()), } @@ -181,7 +181,7 @@ func (s *packageAgentSuite) TestExperimentTimeout() { Starting(agentUnitXP). Started(processUnitXP). Started(traceUnitXP). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnitXP), ). @@ -198,7 +198,7 @@ func (s *packageAgentSuite) TestExperimentTimeout() { Unordered(host.SystemdEvents(). Started(traceUnit). Started(processUnit). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnit), ), ) @@ -248,7 +248,7 @@ func (s *packageAgentSuite) TestExperimentIgnoringSigterm() { Starting(agentUnitXP). Started(processUnitXP). Started(traceUnitXP). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnitXP), ). @@ -271,7 +271,7 @@ func (s *packageAgentSuite) TestExperimentIgnoringSigterm() { Unordered(host.SystemdEvents(). Started(traceUnit). Started(processUnit). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnit), ), ) @@ -311,7 +311,7 @@ func (s *packageAgentSuite) TestExperimentExits() { Starting(agentUnitXP). Started(processUnitXP). Started(traceUnitXP). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnitXP), ). @@ -327,7 +327,7 @@ func (s *packageAgentSuite) TestExperimentExits() { Unordered(host.SystemdEvents(). Started(traceUnit). Started(processUnit). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnit), ), ) @@ -351,7 +351,7 @@ func (s *packageAgentSuite) TestExperimentStopped() { s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().Started(traceUnitXP)) s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().Started(processUnitXP)) s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().Skipped(securityUnitXP)) - s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible)) + s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible)) // stop experiment timestamp = s.host.LastJournaldTimestamp() @@ -370,7 +370,7 @@ func (s *packageAgentSuite) TestExperimentStopped() { Unordered(host.SystemdEvents(). Started(traceUnit). Started(processUnit). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnit), ), ) @@ -397,6 +397,44 @@ func (s *packageAgentSuite) TestRunPath() { assert.True(s.T(), strings.HasPrefix(runPath, "/opt/datadog-packages/datadog-agent/"), "run_path is not in the expected location: %s", runPath) } +func (s *packageAgentSuite) TestUpgrade_DisabledAgentDebRPM_to_OCI() { + // install deb/rpm agent + s.RunInstallScript(envForceNoInstall("datadog-agent")) + s.host.AssertPackageInstalledByPackageManager("datadog-agent") + + defer s.Purge() + defer s.purgeAgentDebInstall() + + state := s.host.State() + s.assertUnits(state, true) + state.AssertDirExists("/opt/datadog-agent", 0755, "dd-agent", "dd-agent") + + // disable the unit + s.host.Run("sudo systemctl disable datadog-agent") + + // install OCI agent + s.RunInstallScript(envForceInstall("datadog-agent")) + + state = s.host.State() + s.assertUnits(state, false) + s.host.AssertPackageInstalledByInstaller("datadog-agent") + s.host.AssertPackageInstalledByPackageManager("datadog-agent") + + s.host.Run("sudo systemctl show datadog-agent -p ExecStart | grep /opt/datadog-packages") +} + +func (s *packageAgentSuite) TestInstallWithLeftoverDebDir() { + // create /opt/datadog-agent to simulate a disabled agent + s.host.Run("sudo mkdir -p /opt/datadog-agent") + + // install OCI agent + s.RunInstallScript(envForceInstall("datadog-agent")) + + state := s.host.State() + s.assertUnits(state, false) + s.host.Run("sudo systemctl show datadog-agent -p ExecStart | grep /opt/datadog-packages") +} + func (s *packageAgentSuite) purgeAgentDebInstall() { pkgManager := s.host.GetPkgManager() switch pkgManager { diff --git a/test/new-e2e/tests/installer/package_apm_inject_test.go b/test/new-e2e/tests/installer/unix/package_apm_inject_test.go similarity index 99% rename from test/new-e2e/tests/installer/package_apm_inject_test.go rename to test/new-e2e/tests/installer/unix/package_apm_inject_test.go index dbdfa6de25483..389da3c392f3e 100644 --- a/test/new-e2e/tests/installer/package_apm_inject_test.go +++ b/test/new-e2e/tests/installer/unix/package_apm_inject_test.go @@ -25,7 +25,7 @@ type packageApmInjectSuite struct { packageBaseSuite } -func testApmInjectAgent(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite { +func testApmInjectAgent(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite { return &packageApmInjectSuite{ packageBaseSuite: newPackageSuite("apm-inject", os, arch, method), } diff --git a/test/new-e2e/tests/installer/package_definitions.go b/test/new-e2e/tests/installer/unix/package_definitions.go similarity index 100% rename from test/new-e2e/tests/installer/package_definitions.go rename to test/new-e2e/tests/installer/unix/package_definitions.go diff --git a/test/new-e2e/tests/installer/package_installer_test.go b/test/new-e2e/tests/installer/unix/package_installer_test.go similarity index 95% rename from test/new-e2e/tests/installer/package_installer_test.go rename to test/new-e2e/tests/installer/unix/package_installer_test.go index d9b0363b3a4df..2dc0edc917bfd 100644 --- a/test/new-e2e/tests/installer/package_installer_test.go +++ b/test/new-e2e/tests/installer/unix/package_installer_test.go @@ -15,7 +15,7 @@ type packageInstallerSuite struct { packageBaseSuite } -func testInstaller(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite { +func testInstaller(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite { return &packageInstallerSuite{ packageBaseSuite: newPackageSuite("installer", os, arch, method, awshost.WithoutFakeIntake()), } @@ -102,20 +102,20 @@ func (s *packageInstallerSuite) TestReInstall() { func (s *packageInstallerSuite) TestUpdateInstallerOCI() { // Install prod err := s.RunInstallScriptProdOci( - envForceVersion("datadog-installer", "7.55.0-installer-0.2.1-1"), + envForceVersion("datadog-installer", "7.58.0-installer-0.5.1-1"), ) defer s.Purge() assert.NoError(s.T(), err) version := s.Env().RemoteHost.MustExecute("/opt/datadog-packages/datadog-installer/stable/bin/installer/installer version") - assert.Equal(s.T(), "7.55.0-installer-0.2.1\n", version) + assert.Equal(s.T(), "7.58.0-installer-0.5.1\n", version) // Install from QA registry err = s.RunInstallScriptWithError() assert.NoError(s.T(), err) version = s.Env().RemoteHost.MustExecute("/opt/datadog-packages/datadog-installer/stable/bin/installer/installer version") - assert.NotEqual(s.T(), "7.55.0-installer-0.2.1\n", version) + assert.NotEqual(s.T(), "7.58.0-installer-0.5.1\n", version) } func (s *packageInstallerSuite) TestInstallWithUmask() { diff --git a/test/new-e2e/tests/installer/upgrade_scenario_test.go b/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go similarity index 64% rename from test/new-e2e/tests/installer/upgrade_scenario_test.go rename to test/new-e2e/tests/installer/unix/upgrade_scenario_test.go index e4ada47c79316..62da6d6ffda4c 100644 --- a/test/new-e2e/tests/installer/upgrade_scenario_test.go +++ b/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go @@ -10,6 +10,7 @@ import ( "fmt" "time" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/host" e2eos "github.com/DataDog/test-infra-definitions/components/os" "github.com/stretchr/testify/assert" @@ -87,7 +88,7 @@ const ( previousInstallerImageVersion = "7.55.0-installer-0.4.1-1" ) -func testUpgradeScenario(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite { +func testUpgradeScenario(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite { return &upgradeScenarioSuite{ packageBaseSuite: newPackageSuite("upgrade_scenario", os, arch, method), } @@ -103,6 +104,7 @@ func (s *upgradeScenarioSuite) TestUpgradeSuccessful() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) s.executeAgentGoldenPath() @@ -118,7 +120,6 @@ func (s *upgradeScenarioSuite) TestUpgradeFromExistingExperiment() { "datadog-agent-process.service", "datadog-installer.service", ) - s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) @@ -147,6 +148,7 @@ func (s *upgradeScenarioSuite) TestBackendFailure() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) @@ -170,6 +172,7 @@ func (s *upgradeScenarioSuite) TestExperimentFailure() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) @@ -195,6 +198,7 @@ func (s *upgradeScenarioSuite) TestExperimentCurrentVersion() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") // Temporary catalog to wait for the installer to be ready s.setCatalog(testCatalog) @@ -225,6 +229,7 @@ func (s *upgradeScenarioSuite) TestStopWithoutExperiment() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) beforeStatus := s.getInstallerStatus() @@ -244,6 +249,7 @@ func (s *upgradeScenarioSuite) TestDoubleExperiments() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) @@ -272,6 +278,7 @@ func (s *upgradeScenarioSuite) TestPromoteWithoutExperiment() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) @@ -297,6 +304,7 @@ func (s *upgradeScenarioSuite) TestInstallerSuccessful() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) s.executeInstallerGoldenPath() @@ -312,6 +320,7 @@ func (s *upgradeScenarioSuite) TestInstallerBackendFailure() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) @@ -339,6 +348,7 @@ func (s *upgradeScenarioSuite) TestInstallerAgentFailure() { "datadog-agent-process.service", "datadog-installer.service", ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") s.setCatalog(testCatalog) @@ -374,6 +384,116 @@ func (s *upgradeScenarioSuite) TestUpgradeSuccessfulWithUmask() { s.TestUpgradeSuccessful() } +func (s *upgradeScenarioSuite) TestConfigUpgradeSuccessful() { + localCDN := host.NewLocalCDN(s.host) + localCDN.AddLayer("config", "\"log_level\": \"debug\"") + s.RunInstallScript( + "DD_REMOTE_UPDATES=true", + "DD_REMOTE_POLICIES=true", + fmt.Sprintf("DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH=%s", localCDN.DirPath), + ) + defer s.Purge() + s.host.AssertPackageInstalledByInstaller("datadog-agent") + s.host.WaitForUnitActive( + "datadog-agent.service", + "datadog-agent-trace.service", + "datadog-agent-process.service", + "datadog-installer.service", + ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") + + state := s.host.State() + state.AssertSymlinkExists("/etc/datadog-packages/datadog-agent/stable", "/etc/datadog-packages/datadog-agent/e94406c45ae766b7d34d2793e4759b9c4d15ed5d5e2b7f73ce1bf0e6836f728d", "root", "root") + + localCDN.UpdateLayer("config", "\"log_level\": \"error\"") + s.executeConfigGoldenPath(localCDN.DirPath, "c78c5e96820c89c6cbc178ddba4ce20a167138a3a580ed4637369a9c5ed804c3") +} + +func (s *upgradeScenarioSuite) TestUpgradeConfigFromExistingExperiment() { + localCDN := host.NewLocalCDN(s.host) + localCDN.AddLayer("config", "\"log_level\": \"debug\"") + s.RunInstallScript( + "DD_REMOTE_UPDATES=true", + "DD_REMOTE_POLICIES=true", + fmt.Sprintf("DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH=%s", localCDN.DirPath), + ) + defer s.Purge() + s.host.AssertPackageInstalledByInstaller("datadog-agent") + s.host.WaitForUnitActive( + "datadog-agent.service", + "datadog-agent-trace.service", + "datadog-agent-process.service", + "datadog-installer.service", + ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") + + localCDN.UpdateLayer("config", "\"log_level\": \"error\"") + + timestamp := s.host.LastJournaldTimestamp() + s.mustStartConfigExperiment(localCDN.DirPath, datadogAgent, "c78c5e96820c89c6cbc178ddba4ce20a167138a3a580ed4637369a9c5ed804c3") + s.assertSuccessfulConfigStartExperiment(timestamp, "c78c5e96820c89c6cbc178ddba4ce20a167138a3a580ed4637369a9c5ed804c3") + + // Host was left with a config experiment, we're now testing + // that we can still upgrade + timestamp = s.host.LastJournaldTimestamp() + s.mustStopConfigExperiment(localCDN.DirPath, datadogAgent) + s.assertSuccessfulConfigStopExperiment(timestamp) + + localCDN.UpdateLayer("config", "\"log_level\": \"info\"") + s.executeConfigGoldenPath(localCDN.DirPath, "a16f6c17fdb819a22bdbf80c34aeaa0a9e691865dc6a66d29e4d78586c967b1e") +} + +func (s *upgradeScenarioSuite) TestUpgradeConfigFailure() { + localCDN := host.NewLocalCDN(s.host) + localCDN.AddLayer("config", "\"log_level\": \"debug\"") + s.RunInstallScript( + "DD_REMOTE_UPDATES=true", + "DD_REMOTE_POLICIES=true", + fmt.Sprintf("DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH=%s", localCDN.DirPath), + ) + defer s.Purge() + s.host.AssertPackageInstalledByInstaller("datadog-agent") + s.host.WaitForUnitActive( + "datadog-agent.service", + "datadog-agent-trace.service", + "datadog-agent-process.service", + "datadog-installer.service", + ) + s.host.WaitForFileExists(true, "/opt/datadog-packages/run/installer.sock") + + localCDN.UpdateLayer("config", "\"secret_backend_command\":\"echo\",\"log_level\":\"ENC[hi]\"") // Non alphanumerical characters are not allowed, the agent should crash + timestamp := s.host.LastJournaldTimestamp() + _, err := s.startConfigExperiment(localCDN.DirPath, datadogAgent, "f8e8662c2e0a7a2feb019626d5f6998d845ff6d319c557f406cbba9a4d90feee") + s.T().Logf("Error: %s", s.Env().RemoteHost.MustExecute("cat /tmp/start_config_experiment.log")) + require.NoError(s.T(), err) + + // Assert experiment is stopped as the agent should've crashed + s.host.AssertSystemdEvents(timestamp, host.SystemdEvents(). + Unordered(host.SystemdEvents(). // Stable stops + Stopped(agentUnit). + Stopped(processUnit). + Stopped(traceUnit), + ). + Unordered(host.SystemdEvents(). // Experiment starts + Starting(agentUnitXP). + Started(processUnitXP). + Started(traceUnitXP), + ). + Unordered(host.SystemdEvents(). // Experiment fails + Failed(agentUnitXP). + Stopped(processUnitXP). + Stopped(traceUnitXP), + ). + Started(agentUnit). // Stable restarts + Unordered(host.SystemdEvents(). + Started(traceUnit). + Started(processUnit), + ), + ) + + s.mustStopExperiment(datadogAgent) +} + func (s *upgradeScenarioSuite) startExperiment(pkg packageName, version string) (string, error) { cmd := fmt.Sprintf("sudo datadog-installer daemon start-experiment %s %s > /tmp/start_experiment.log 2>&1", pkg, version) s.T().Logf("Running start command: %s", cmd) @@ -382,7 +502,7 @@ func (s *upgradeScenarioSuite) startExperiment(pkg packageName, version string) func (s *upgradeScenarioSuite) mustStartExperiment(pkg packageName, version string) string { output, err := s.startExperiment(pkg, version) - require.NoError(s.T(), err, "Failed to start experiment: %s\ndatadog-installer journalctl:\n%s\ndatadog-installer-exp journalctl:\n%s", + require.NoError(s.T(), err, "Failed to start experiment: v%s\ndatadog-installer journalctl:\n%s\ndatadog-installer-exp journalctl:\n%s", s.Env().RemoteHost.MustExecute("cat /tmp/start_experiment.log"), s.Env().RemoteHost.MustExecute("sudo journalctl -xeu datadog-installer --no-pager"), s.Env().RemoteHost.MustExecute("sudo journalctl -xeu datadog-installer-exp --no-pager"), @@ -501,6 +621,117 @@ func (s *upgradeScenarioSuite) assertSuccessfulAgentStopExperiment(timestamp hos require.Equal(s.T(), "", installerStatus.Packages["datadog-agent"].ExperimentVersion) } +func (s *upgradeScenarioSuite) startConfigExperiment(localCDNPath string, pkg packageName, hash string) (string, error) { + cmd := fmt.Sprintf("sudo -E datadog-installer install-config-experiment %s %s > /tmp/start_config_experiment.log 2>&1", pkg, hash) + s.T().Logf("Running start command: %s", cmd) + return s.Env().RemoteHost.Execute(cmd, client.WithEnvVariables(map[string]string{"DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH": localCDNPath})) +} + +func (s *upgradeScenarioSuite) mustStartConfigExperiment(localCDNPath string, pkg packageName, version string) string { + output, err := s.startConfigExperiment(localCDNPath, pkg, version) + require.NoError(s.T(), err, "Failed to start config experiment: %s\ndatadog-installer journalctl:\n%s\ndatadog-installer-exp journalctl:\n%s", + s.Env().RemoteHost.MustExecute("cat /tmp/start_config_experiment.log"), + s.Env().RemoteHost.MustExecute("sudo journalctl -xeu datadog-installer --no-pager"), + s.Env().RemoteHost.MustExecute("sudo journalctl -xeu datadog-installer-exp --no-pager"), + ) + return output +} + +func (s *upgradeScenarioSuite) promoteConfigExperiment(localCDNPath string, pkg packageName) (string, error) { + cmd := fmt.Sprintf("sudo -E datadog-installer promote-config-experiment %s > /tmp/promote_config_experiment.log 2>&1", pkg) + s.T().Logf("Running promote command: %s", cmd) + return s.Env().RemoteHost.Execute(cmd, client.WithEnvVariables(map[string]string{"DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH": localCDNPath})) +} + +func (s *upgradeScenarioSuite) mustPromoteConfigExperiment(localCDNPath string, pkg packageName) string { + output, err := s.promoteConfigExperiment(localCDNPath, pkg) + require.NoError(s.T(), err, "Failed to promote config experiment: %s\ndatadog-installer journalctl:\n%s\ndatadog-installer-exp journalctl:\n%s", + s.Env().RemoteHost.MustExecute("cat /tmp/promote_config_experiment.log"), + s.Env().RemoteHost.MustExecute("sudo journalctl -xeu datadog-installer --no-pager"), + s.Env().RemoteHost.MustExecute("sudo journalctl -xeu datadog-installer-exp --no-pager"), + ) + return output +} + +func (s *upgradeScenarioSuite) stopConfigExperiment(localCDNPath string, pkg packageName) (string, error) { + cmd := fmt.Sprintf("sudo -E datadog-installer remove-config-experiment %s > /tmp/stop_config_experiment.log 2>&1", pkg) + s.T().Logf("Running stop command: %s", cmd) + return s.Env().RemoteHost.Execute(cmd, client.WithEnvVariables(map[string]string{"DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH": localCDNPath})) +} + +func (s *upgradeScenarioSuite) mustStopConfigExperiment(localCDNPath string, pkg packageName) string { + output, err := s.stopConfigExperiment(localCDNPath, pkg) + require.NoError(s.T(), err, "Failed to stop experiment: %s\ndatadog-installer journalctl:\n%s\ndatadog-installer-exp journalctl:\n%s", + s.Env().RemoteHost.MustExecute("cat /tmp/stop_config_experiment.log"), + s.Env().RemoteHost.MustExecute("sudo journalctl -xeu datadog-installer --no-pager"), + s.Env().RemoteHost.MustExecute("sudo journalctl -xeu datadog-installer-exp --no-pager"), + ) + return output +} + +func (s *upgradeScenarioSuite) assertSuccessfulConfigStartExperiment(timestamp host.JournaldTimestamp, hash string) { + s.host.WaitForUnitActivating(agentUnitXP) + s.host.WaitForFileExists(false, "/opt/datadog-packages/datadog-agent/experiment/run/agent.pid") + + // Assert experiment is running + s.host.AssertSystemdEvents(timestamp, host.SystemdEvents(). + Unordered(host.SystemdEvents(). + Stopped(agentUnit). + Stopped(traceUnit). + Stopped(processUnit), + ). + Unordered(host.SystemdEvents(). + Starting(agentUnitXP). + Started(traceUnitXP). + Started(processUnitXP), + ), + ) + + state := s.host.State() + state.AssertSymlinkExists("/etc/datadog-packages/datadog-agent/experiment", fmt.Sprintf("/etc/datadog-packages/datadog-agent/%s", hash), "root", "root") +} + +func (s *upgradeScenarioSuite) assertSuccessfulConfigPromoteExperiment(timestamp host.JournaldTimestamp, hash string) { + s.host.WaitForUnitActive(agentUnit) + + // Assert experiment is promoted + s.host.AssertSystemdEvents(timestamp, host.SystemdEvents(). + Unordered(host.SystemdEvents(). + Stopped(agentUnitXP). + Stopped(processUnitXP). + Stopped(traceUnitXP), + ). + Unordered(host.SystemdEvents(). + Started(agentUnit). + Stopped(processUnit). + Stopped(traceUnit), + ), + ) + + state := s.host.State() + state.AssertSymlinkExists("/etc/datadog-packages/datadog-agent/stable", fmt.Sprintf("/etc/datadog-packages/datadog-agent/%s", hash), "root", "root") + state.AssertSymlinkExists("/etc/datadog-packages/datadog-agent/experiment", fmt.Sprintf("/etc/datadog-packages/datadog-agent/%s", hash), "root", "root") +} + +func (s *upgradeScenarioSuite) assertSuccessfulConfigStopExperiment(timestamp host.JournaldTimestamp) { + // Assert experiment is stopped + s.host.AssertSystemdEvents(timestamp, host.SystemdEvents(). + Unordered(host.SystemdEvents(). + Stopped(agentUnitXP). + Stopped(processUnitXP). + Stopped(traceUnitXP), + ). + Started(agentUnit). + Unordered(host.SystemdEvents(). + Started(traceUnit). + Started(processUnit), + ), + ) + + state := s.host.State() + state.AssertSymlinkExists("/etc/datadog-packages/datadog-agent/experiment", "/etc/datadog-packages/datadog-agent/stable", "root", "root") +} + func (s *upgradeScenarioSuite) getInstallerStatus() installerStatus { socketPath := "/opt/datadog-packages/run/installer.sock" @@ -604,3 +835,13 @@ func (s *upgradeScenarioSuite) executeInstallerGoldenPath() { s.promoteExperiment(datadogInstaller) s.assertSuccessfulInstallerPromoteExperiment(timestamp, previousInstallerImageVersion) } + +func (s *upgradeScenarioSuite) executeConfigGoldenPath(localCDNPath, hash string) { + timestamp := s.host.LastJournaldTimestamp() + s.mustStartConfigExperiment(localCDNPath, datadogAgent, hash) + s.assertSuccessfulConfigStartExperiment(timestamp, hash) + + timestamp = s.host.LastJournaldTimestamp() + s.mustPromoteConfigExperiment(localCDNPath, datadogAgent) + s.assertSuccessfulConfigPromoteExperiment(timestamp, hash) +} diff --git a/test/new-e2e/tests/installer/windows/base_suite.go b/test/new-e2e/tests/installer/windows/base_suite.go index 3d9a9307c29c6..e95fbe8d3adc7 100644 --- a/test/new-e2e/tests/installer/windows/base_suite.go +++ b/test/new-e2e/tests/installer/windows/base_suite.go @@ -6,14 +6,14 @@ package installer import ( - "fmt" + "os" + "strings" + agentVersion "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/windows/suite-assertions" - "os" - "strings" + suiteasserts "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/windows/suite-assertions" ) // PackageVersion is a helper type to store both the version and the package version of a binary. @@ -60,6 +60,7 @@ type BaseInstallerSuite struct { currentAgentVersion agentVersion.Version stableInstallerVersion PackageVersion stableAgentVersion PackageVersion + outputDir string } // Installer the Datadog Installer for testing. @@ -110,10 +111,11 @@ func (s *BaseInstallerSuite) SetupSuite() { func (s *BaseInstallerSuite) BeforeTest(suiteName, testName string) { s.BaseSuite.BeforeTest(suiteName, testName) - outputDir, err := runner.GetTestOutputDir(runner.GetProfile(), s.T()) + var err error + s.outputDir, err = runner.GetTestOutputDir(runner.GetProfile(), s.T()) s.Require().NoError(err, "should get output dir") - s.T().Logf("Output dir: %s", outputDir) - s.installer = NewDatadogInstaller(s.Env(), fmt.Sprintf("%s/install.log", outputDir)) + s.T().Logf("Output dir: %s", s.outputDir) + s.installer = NewDatadogInstaller(s.Env(), s.outputDir) } // Require instantiates a suiteAssertions for the current suite. @@ -127,3 +129,8 @@ func (s *BaseInstallerSuite) BeforeTest(suiteName, testName string) { func (s *BaseInstallerSuite) Require() *suiteasserts.SuiteAssertions { return suiteasserts.New(s.BaseSuite.Require(), s) } + +// OutputDir returns the output directory for the test +func (s *BaseInstallerSuite) OutputDir() string { + return s.outputDir +} diff --git a/test/new-e2e/tests/installer/windows/datadog_installer.go b/test/new-e2e/tests/installer/windows/datadog_installer.go index e246a29a5bbd1..cc08a13e3c4b1 100644 --- a/test/new-e2e/tests/installer/windows/datadog_installer.go +++ b/test/new-e2e/tests/installer/windows/datadog_installer.go @@ -8,18 +8,19 @@ package installer import ( "fmt" + "os" + "path" + "path/filepath" + "strings" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" - "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer" + installer "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/unix" windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent/installers/v2" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/pipeline" e2eos "github.com/DataDog/test-infra-definitions/components/os" - "os" - "path" - "path/filepath" - "strings" ) const ( @@ -49,16 +50,20 @@ var ( type DatadogInstaller struct { binaryPath string env *environments.WindowsHost - logPath string + outputDir string } // NewDatadogInstaller instantiates a new instance of the Datadog Installer running // on a remote Windows host. -func NewDatadogInstaller(env *environments.WindowsHost, logPath string) *DatadogInstaller { +func NewDatadogInstaller(env *environments.WindowsHost, outputDir string) *DatadogInstaller { + if outputDir == "" { + outputDir = os.TempDir() + } + return &DatadogInstaller{ binaryPath: path.Join(Path, BinaryName), env: env, - logPath: logPath, + outputDir: outputDir, } } @@ -130,8 +135,9 @@ func (d *DatadogInstaller) RemoveExperiment(packageName string) (string, error) // Params contains the optional parameters for the Datadog Installer Install command type Params struct { - installerURL string - msiArgs []string + installerURL string + msiArgs []string + msiLogFilename string } // Option is an optional function parameter type for the Datadog Installer Install command @@ -153,18 +159,25 @@ func WithMSIArg(arg string) Option { } } +// WithMSILogFile sets the filename for the MSI log file, to be stored in the output directory. +func WithMSILogFile(filename string) Option { + return func(params *Params) error { + params.msiLogFilename = filename + return nil + } +} + // WithInstallerURLFromInstallersJSON uses a specific URL for the Datadog Installer from an installers_v2.json // file. -// bucket: The S3 bucket to look for the installers_v2.json file, i.e. "dd-agent-mstesting" -// channel: The channel in the bucket, i.e. "stable" +// jsonURL: The URL of the installers_v2.json file, i.e. pipeline.StableURL // version: The artifact version to retrieve, i.e. "7.56.0-installer-0.4.5-1" // -// Example: WithInstallerURLFromInstallersJSON("dd-agent-mstesting", "stable", "7.56.0-installer-0.4.5-1") -// will look into "https://s3.amazonaws.com/dd-agent-mstesting/builds/stable/installers_v2.json" for the Datadog Installer +// Example: WithInstallerURLFromInstallersJSON(pipeline.StableURL, "7.56.0-installer-0.4.5-1") +// will look into "https://s3.amazonaws.com/ddagent-windows-stable/stable/installers_v2.json" for the Datadog Installer // version "7.56.0-installer-0.4.5-1" -func WithInstallerURLFromInstallersJSON(bucket, channel, version string) Option { +func WithInstallerURLFromInstallersJSON(jsonURL, version string) Option { return func(params *Params) error { - url, err := installers.GetProductURL(fmt.Sprintf("https://s3.amazonaws.com/%s/builds/%s/installers_v2.json", bucket, channel), "datadog-installer", version, "x86_64") + url, err := installers.GetProductURL(jsonURL, "datadog-installer", version, "x86_64") if err != nil { return err } @@ -176,10 +189,12 @@ func WithInstallerURLFromInstallersJSON(bucket, channel, version string) Option // Install will attempt to install the Datadog Installer on the remote host. // By default, it will use the installer from the current pipeline. func (d *DatadogInstaller) Install(opts ...Option) error { - params := Params{} + params := Params{ + msiLogFilename: "install.log", + } err := optional.ApplyOptions(¶ms, opts) if err != nil { - return nil + return err } // MSI can install from a URL or a local file msiPath := params.installerURL @@ -201,9 +216,9 @@ func (d *DatadogInstaller) Install(opts ...Option) error { params.installerURL = artifactURL msiPath = params.installerURL } - logPath := d.logPath - if logPath == "" { - logPath = filepath.Join(os.TempDir(), "install.log") + logPath := filepath.Join(d.outputDir, params.msiLogFilename) + if _, err := os.Stat(logPath); err == nil { + return fmt.Errorf("log file %s already exists", logPath) } msiArgs := "" if params.msiArgs != nil { @@ -214,10 +229,12 @@ func (d *DatadogInstaller) Install(opts ...Option) error { // Uninstall will attempt to uninstall the Datadog Installer on the remote host. func (d *DatadogInstaller) Uninstall(opts ...Option) error { - params := Params{} + params := Params{ + msiLogFilename: "uninstall.log", + } err := optional.ApplyOptions(¶ms, opts) if err != nil { - return nil + return err } productCode, err := windowsCommon.GetProductCodeByName(d.env.RemoteHost, "Datadog Installer") @@ -225,9 +242,9 @@ func (d *DatadogInstaller) Uninstall(opts ...Option) error { return err } - logPath := d.logPath - if logPath == "" { - logPath = filepath.Join(os.TempDir(), "uninstall.log") + logPath := filepath.Join(d.outputDir, params.msiLogFilename) + if _, err := os.Stat(logPath); err == nil { + return fmt.Errorf("log file %s already exists", logPath) } msiArgs := "" if params.msiArgs != nil { diff --git a/test/new-e2e/tests/installer/windows/suites/agent-package/agent-user_test.go b/test/new-e2e/tests/installer/windows/suites/agent-package/agent-user_test.go new file mode 100644 index 0000000000000..9c7d4d716e468 --- /dev/null +++ b/test/new-e2e/tests/installer/windows/suites/agent-package/agent-user_test.go @@ -0,0 +1,65 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package agenttests + +import ( + installer "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components/datadog-installer" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host/windows" + installerwindows "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/windows" + windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" + windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent" + + "github.com/stretchr/testify/require" + "testing" +) + +type testAgentInstallWithAgentUserSuite struct { + installerwindows.BaseInstallerSuite + agentUser string +} + +// TestAgentInstalls tests the usage of the Datadog installer to install the Datadog Agent package. +func TestAgentInstallsWithAgentUser(t *testing.T) { + agentUser := "customuser" + require.NotEqual(t, windowsAgent.DefaultAgentUserName, agentUser, "the custom user should be different from the default user") + + e2e.Run(t, &testAgentInstallWithAgentUserSuite{ + agentUser: agentUser, + }, + e2e.WithProvisioner( + winawshost.ProvisionerNoAgentNoFakeIntake( + winawshost.WithInstaller( + installer.WithAgentUser("customuser"), + ), + ))) +} + +// TestInstallAgentPackage tests installing and uninstalling the Datadog Agent using the Datadog installer. +func (s *testAgentInstallWithAgentUserSuite) TestInstallAgentPackage() { + s.Run("Install", func() { + s.installAgent() + }) +} + +func (s *testAgentInstallWithAgentUserSuite) installAgent() { + // Arrange + + // Act + _, err := s.Installer().InstallPackage(installerwindows.AgentPackage) + + // Assert + s.Require().NoErrorf(err, "failed to install the Datadog Agent package") + s.Require().Host(s.Env().RemoteHost). + HasARunningDatadogAgentService(). + HasRegistryKey(installerwindows.RegistryKeyPath). + WithValueEqual("installedUser", s.agentUser) + identity, err := windowsCommon.GetIdentityForUser(s.Env().RemoteHost, s.agentUser) + s.Require().NoError(err) + s.Require().Host(s.Env().RemoteHost). + HasAService("datadogagent"). + WithIdentity(identity) +} diff --git a/test/new-e2e/tests/installer/windows/suites/agent-package/install_test.go b/test/new-e2e/tests/installer/windows/suites/agent-package/install_test.go index b318186b53f72..38fd858783940 100644 --- a/test/new-e2e/tests/installer/windows/suites/agent-package/install_test.go +++ b/test/new-e2e/tests/installer/windows/suites/agent-package/install_test.go @@ -7,9 +7,13 @@ package agenttests import ( + "path/filepath" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host/windows" installerwindows "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/windows" + windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent" + "testing" ) @@ -30,7 +34,7 @@ func TestAgentInstalls(t *testing.T) { func (s *testAgentInstallSuite) TestInstallAgentPackage() { s.Run("Install", func() { s.installAgent() - s.Run("Uninstall", s.uninstallAgent) + s.Run("Uninstall", s.removeAgentPackage) }) } @@ -45,7 +49,7 @@ func (s *testAgentInstallSuite) installAgent() { s.Require().Host(s.Env().RemoteHost).HasARunningDatadogAgentService() } -func (s *testAgentInstallSuite) uninstallAgent() { +func (s *testAgentInstallSuite) removeAgentPackage() { // Arrange // Act @@ -54,4 +58,33 @@ func (s *testAgentInstallSuite) uninstallAgent() { // Assert s.Require().NoErrorf(err, "failed to remove the Datadog Agent package: %s", output) s.Require().Host(s.Env().RemoteHost).HasNoDatadogAgentService() + s.Require().Host(s.Env().RemoteHost). + NoDirExists(installerwindows.GetStableDirFor(installerwindows.AgentPackage), + "the package directory should be removed") +} + +func (s *testAgentInstallSuite) TestRemoveAgentAfterMSIUninstall() { + // Arrange + s.installAgent() + s.uninstallAgentWithMSI() + + // Act + + // Assert + s.removeAgentPackage() +} + +func (s *testAgentInstallSuite) uninstallAgentWithMSI() { + // Arrange + + // Act + err := windowsAgent.UninstallAgent(s.Env().RemoteHost, + filepath.Join(s.OutputDir(), "uninstall.log"), + ) + + // Assert + s.Require().NoErrorf(err, "failed to uninstall the Datadog Agent package") + s.Require().Host(s.Env().RemoteHost). + DirExists(installerwindows.GetStableDirFor(installerwindows.AgentPackage), + "the package directory should still exist after manually uninstalling the Agent with the MSI") } diff --git a/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go b/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go index fa75f2665a105..baf96bcea406e 100644 --- a/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go +++ b/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" winawshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host/windows" - "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer" + installer "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/unix" installerwindows "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/windows" ) @@ -38,6 +38,84 @@ func (s *testAgentUpgradeSuite) TestUpgradeAgentPackage() { }) } +// TestDowngradeAgentPackage tests that it's possible to downgrade the Datadog Agent using the Datadog installer. +func (s *testAgentUpgradeSuite) TestDowngradeAgentPackage() { + // Arrange + _, err := s.Installer().InstallPackage(installerwindows.AgentPackage) + s.Require().NoErrorf(err, "failed to install the stable Datadog Agent package") + + // Act + _, err = s.Installer().InstallExperiment(installerwindows.AgentPackage, + installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithVersion(s.StableAgentVersion().PackageVersion()), + installer.WithAuthentication(""), + ) + + // Assert + s.Require().NoErrorf(err, "failed to downgrade to stable Datadog Agent package") + s.Require().Host(s.Env().RemoteHost). + HasARunningDatadogAgentService(). + WithVersionMatchPredicate(func(version string) { + s.Require().Contains(version, s.StableAgentVersion().Version()) + }). + DirExists(installerwindows.GetStableDirFor(installerwindows.AgentPackage)) +} + +func (s *testAgentUpgradeSuite) TestExperimentFailure() { + // Arrange + s.Run("Install stable", func() { + s.installStableAgent() + }) + + // Act + _, err := s.Installer().InstallExperiment(installerwindows.AgentPackage, + installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithVersion("unknown-version"), + installer.WithAuthentication(""), + ) + + // Assert + s.Require().Error(err, "expected an error when trying to start an experiment with an unknown version") + s.stopExperiment() + // TODO: is this the same test as TestStopWithoutExperiment? +} + +func (s *testAgentUpgradeSuite) TestExperimentCurrentVersion() { + // Arrange + s.Run("Install stable", func() { + s.installStableAgent() + }) + + // Act + _, err := s.Installer().InstallExperiment(installerwindows.AgentPackage, + installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithVersion(s.StableAgentVersion().PackageVersion()), + installer.WithAuthentication(""), + ) + + // Assert + s.Require().Error(err, "expected an error when trying to start an experiment with the same version as the current one") + s.Require().Host(s.Env().RemoteHost). + HasARunningDatadogAgentService(). + WithVersionMatchPredicate(func(version string) { + s.Require().Contains(version, s.StableAgentVersion().Version()) + }). + DirExists(installerwindows.GetStableDirFor(installerwindows.AgentPackage)) +} + +func (s *testAgentUpgradeSuite) TestStopWithoutExperiment() { + // Arrange + s.Run("Install stable", func() { + s.installStableAgent() + }) + + // Act + + // Assert + s.stopExperiment() + // TODO: Currently uninstalls stable then reinstalls stable. functional but a waste. +} + func (s *testAgentUpgradeSuite) installStableAgent() { // Arrange diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go b/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go index 240fcc1035f51..de9df45e634d8 100644 --- a/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go +++ b/test/new-e2e/tests/installer/windows/suites/installer-package/base_suite.go @@ -24,7 +24,9 @@ func (s *baseInstallerSuite) freshInstall() { // Arrange // Act - s.Require().NoError(s.Installer().Install()) + s.Require().NoError(s.Installer().Install( + installerwindows.WithMSILogFile("fresh-install.log"), + )) // Assert s.requireInstalled() diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go index 25f3df70dca0b..c4109055ae218 100644 --- a/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go +++ b/test/new-e2e/tests/installer/windows/suites/installer-package/install_test.go @@ -67,7 +67,9 @@ func (s *testInstallerSuite) installWithExistingConfigFile() { // Arrange // Act - s.Require().NoError(s.Installer().Install()) + s.Require().NoError(s.Installer().Install( + installerwindows.WithMSILogFile("with-config-install.log"), + )) // Assert s.requireInstalled() @@ -82,7 +84,9 @@ func (s *testInstallerSuite) repair() { s.Require().NoError(s.Env().RemoteHost.Remove(installerwindows.BinaryPath)) // Act - s.Require().NoError(s.Installer().Install()) + s.Require().NoError(s.Installer().Install( + installerwindows.WithMSILogFile("repair.log"), + )) // Assert s.requireInstalled() diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go index 9b2dadb31de29..b211dce84ead1 100644 --- a/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go +++ b/test/new-e2e/tests/installer/windows/suites/installer-package/rollback_test.go @@ -33,7 +33,10 @@ func (s *testInstallerRollbackSuite) installRollback() { // Arrange // Act - msiErr := s.Installer().Install(installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1")) + msiErr := s.Installer().Install( + installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"), + installerwindows.WithMSILogFile("install-rollback.log"), + ) s.Require().Error(msiErr) // Assert @@ -45,7 +48,10 @@ func (s *testInstallerRollbackSuite) uninstallRollback() { // Arrange // Act - msiErr := s.Installer().Uninstall(installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1")) + msiErr := s.Installer().Uninstall( + installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"), + installerwindows.WithMSILogFile("uninstall-rollback.log"), + ) s.Require().Error(msiErr) // Assert diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go index e9925be832a49..a2fc1d9970507 100644 --- a/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go +++ b/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go @@ -27,7 +27,10 @@ func TestInstallerUpgrades(t *testing.T) { // TestUpgrades tests upgrading the stable version of the Datadog installer to the latest from the pipeline. func (s *testInstallerUpgradesSuite) TestUpgrades() { // Arrange - s.Require().NoError(s.Installer().Install(installerwindows.WithInstallerURLFromInstallersJSON(pipeline.AgentS3BucketTesting, pipeline.StableChannel, s.StableInstallerVersion().PackageVersion()))) + s.Require().NoError(s.Installer().Install( + installerwindows.WithInstallerURLFromInstallersJSON(pipeline.StableURL, s.StableInstallerVersion().PackageVersion())), + installerwindows.WithMSILogFile("install.log"), + ) // sanity check: make sure we did indeed install the stable version s.Require().Host(s.Env().RemoteHost). HasBinary(installerwindows.BinaryPath). @@ -36,7 +39,9 @@ func (s *testInstallerUpgradesSuite) TestUpgrades() { // Act // Install "latest" from the pipeline - s.Require().NoError(s.Installer().Install()) + s.Require().NoError(s.Installer().Install( + installerwindows.WithMSILogFile("upgrade.log"), + )) // Assert s.Require().Host(s.Env().RemoteHost). diff --git a/test/new-e2e/tests/npm/common_1host.go b/test/new-e2e/tests/npm/common_1host.go index 7a1fcd5824189..2f0456786a2ba 100644 --- a/test/new-e2e/tests/npm/common_1host.go +++ b/test/new-e2e/tests/npm/common_1host.go @@ -164,6 +164,10 @@ func test1HostFakeIntakeNPMTCPUDPDNS[Env any](v *e2e.BaseSuite[Env], FakeIntake v.EventuallyWithT(func(c *assert.CollectT) { cnx, err := FakeIntake.Client().GetConnections() assert.NoError(c, err, "GetConnections() errors") + if !assert.NotNil(c, cnx, "GetConnections() returned nil ConnectionsAggregator") { + return + } + if !assert.NotEmpty(c, cnx.GetNames(), "no connections yet") { return } diff --git a/test/new-e2e/tests/otel/otel_test.go b/test/new-e2e/tests/otel/otel_test.go index d67c79aefcd90..1a00bf90d9e51 100644 --- a/test/new-e2e/tests/otel/otel_test.go +++ b/test/new-e2e/tests/otel/otel_test.go @@ -38,6 +38,7 @@ type linuxTestSuite struct { var collectorConfig string func TestOTel(t *testing.T) { + t.Parallel() e2e.Run(t, &linuxTestSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner(awskubernetes.WithAgentOptions(kubernetesagentparams.WithoutDualShipping(), kubernetesagentparams.WithOTelAgent(), kubernetesagentparams.WithOTelConfig(collectorConfig))))) } diff --git a/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go b/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go new file mode 100644 index 0000000000000..f15c74e858220 --- /dev/null +++ b/test/new-e2e/tests/otel/otlp-ingest/pipelines_test.go @@ -0,0 +1,148 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package localkubernetes contains the provisioner for the local Kubernetes based environments + +package otel + +import ( + "context" + _ "embed" + "fmt" + "testing" + "time" + + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + "github.com/stretchr/testify/assert" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" + fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/kubernetes" +) + +type otelIngestTestSuite struct { + e2e.BaseSuite[environments.Kubernetes] +} + +func TestOTelIngest(t *testing.T) { + values := ` +datadog: + otlp: + receiver: + protocols: + grpc: + enabled: true + logs: + enabled: true +` + t.Parallel() + e2e.Run(t, &otelIngestTestSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner(awskubernetes.WithAgentOptions(kubernetesagentparams.WithoutDualShipping(), kubernetesagentparams.WithHelmValues(values))))) +} + +func (s *otelIngestTestSuite) TestOTLPTraces() { + ctx := context.Background() + s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + service := "telemetrygen-job" + numTraces := 10 + + s.T().Log("Starting telemetrygen") + s.createTelemetrygenJob(ctx, "traces", []string{"--service", service, "--traces", fmt.Sprint(numTraces)}) + + s.T().Log("Waiting for traces") + s.EventuallyWithT(func(c *assert.CollectT) { + traces, err := s.Env().FakeIntake.Client().GetTraces() + assert.NoError(c, err) + assert.NotEmpty(c, traces) + trace := traces[0] + assert.Equal(c, "none", trace.Env) + assert.NotEmpty(c, trace.TracerPayloads) + tp := trace.TracerPayloads[0] + assert.NotEmpty(c, tp.Chunks) + assert.NotEmpty(c, tp.Chunks[0].Spans) + spans := tp.Chunks[0].Spans + for _, sp := range spans { + assert.Equal(c, service, sp.Service) + assert.Equal(c, "telemetrygen", sp.Meta["otel.library.name"]) + } + }, 2*time.Minute, 10*time.Second) +} + +func (s *otelIngestTestSuite) TestOTLPMetrics() { + ctx := context.Background() + s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + service := "telemetrygen-job" + serviceAttribute := fmt.Sprintf("service.name=\"%v\"", service) + numMetrics := 10 + + s.T().Log("Starting telemetrygen") + s.createTelemetrygenJob(ctx, "metrics", []string{"--metrics", fmt.Sprint(numMetrics), "--otlp-attributes", serviceAttribute}) + + s.T().Log("Waiting for metrics") + s.EventuallyWithT(func(c *assert.CollectT) { + serviceTag := "service:" + service + metrics, err := s.Env().FakeIntake.Client().FilterMetrics("gen", fakeintake.WithTags[*aggregator.MetricSeries]([]string{serviceTag})) + assert.NoError(c, err) + assert.NotEmpty(c, metrics) + }, 2*time.Minute, 10*time.Second) +} + +func (s *otelIngestTestSuite) TestOTLPLogs() { + ctx := context.Background() + s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + service := "telemetrygen-job" + serviceAttribute := fmt.Sprintf("service.name=\"%v\"", service) + numLogs := 10 + logBody := "telemetrygen log" + + s.T().Log("Starting telemetrygen") + s.createTelemetrygenJob(ctx, "logs", []string{"--logs", fmt.Sprint(numLogs), "--otlp-attributes", serviceAttribute, "--body", logBody}) + + s.T().Log("Waiting for logs") + s.EventuallyWithT(func(c *assert.CollectT) { + logs, err := s.Env().FakeIntake.Client().FilterLogs(service) + assert.NoError(c, err) + assert.NotEmpty(c, logs) + for _, log := range logs { + assert.Contains(c, log.Message, logBody) + } + }, 2*time.Minute, 10*time.Second) +} + +func (s *otelIngestTestSuite) createTelemetrygenJob(ctx context.Context, telemetry string, options []string) { + var ttlSecondsAfterFinished int32 = 600 //nolint:revive // We want to see this is explicitly set to 0 + var backOffLimit int32 = 4 + + otlpEndpoint := fmt.Sprintf("%v:4317", s.Env().Agent.LinuxNodeAgent.LabelSelectors["app"]) + jobSpec := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("telemetrygen-job-%v", telemetry), + Namespace: "datadog", + }, + Spec: batchv1.JobSpec{ + TTLSecondsAfterFinished: &ttlSecondsAfterFinished, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "telemetrygen-job", + Image: "ghcr.io/open-telemetry/opentelemetry-collector-contrib/telemetrygen:latest", + Command: append([]string{"/telemetrygen", telemetry, "--otlp-endpoint", otlpEndpoint, "--otlp-insecure"}, options...), + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + BackoffLimit: &backOffLimit, + }, + } + + _, err := s.Env().KubernetesCluster.Client().BatchV1().Jobs("datadog").Create(ctx, jobSpec, metav1.CreateOptions{}) + assert.NoError(s.T(), err, "Could not properly start job") +} diff --git a/test/new-e2e/tests/process/ecs_test.go b/test/new-e2e/tests/process/ecs_test.go index 84ee94ec56d2d..10c42d696a512 100644 --- a/test/new-e2e/tests/process/ecs_test.go +++ b/test/new-e2e/tests/process/ecs_test.go @@ -6,6 +6,7 @@ package process import ( + "fmt" "testing" "time" @@ -25,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/ecs" ) -type ECSSuite struct { +type ECSEC2Suite struct { e2e.BaseSuite[ecsCPUStressEnv] } @@ -33,7 +34,7 @@ type ecsCPUStressEnv struct { environments.ECS } -func ecsCPUStressProvisioner() e2e.PulumiEnvRunFunc[ecsCPUStressEnv] { +func ecsEC2CPUStressProvisioner(runInCoreAgent bool) e2e.PulumiEnvRunFunc[ecsCPUStressEnv] { return func(ctx *pulumi.Context, env *ecsCPUStressEnv) error { awsEnv, err := aws.NewEnvironment(ctx) if err != nil { @@ -45,6 +46,7 @@ func ecsCPUStressProvisioner() e2e.PulumiEnvRunFunc[ecsCPUStressEnv] { ecs.WithECSLinuxECSOptimizedNodeGroup(), ecs.WithAgentOptions( ecsagentparams.WithAgentServiceEnvVariable("DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED", "true"), + ecsagentparams.WithAgentServiceEnvVariable("DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED", fmt.Sprintf("%t", runInCoreAgent)), ), ecs.WithWorkloadApp(func(e aws.Environment, clusterArn pulumi.StringInput) (*ecsComp.Workload, error) { return cpustress.EcsAppDefinition(e, clusterArn) @@ -59,16 +61,16 @@ func ecsCPUStressProvisioner() e2e.PulumiEnvRunFunc[ecsCPUStressEnv] { } } -func TestECSTestSuite(t *testing.T) { +func TestECSEC2TestSuite(t *testing.T) { t.Parallel() - s := ECSSuite{} + s := ECSEC2Suite{} e2eParams := []e2e.SuiteOption{e2e.WithProvisioner( - e2e.NewTypedPulumiProvisioner("ecsCPUStress", ecsCPUStressProvisioner(), nil))} + e2e.NewTypedPulumiProvisioner("ecsEC2CPUStress", ecsEC2CPUStressProvisioner(false), nil))} e2e.Run(t, &s, e2eParams...) } -func (s *ECSSuite) TestECSProcessCheck() { +func (s *ECSEC2Suite) TestProcessCheck() { t := s.T() // PROCS-4219 flake.Mark(t) @@ -86,3 +88,28 @@ func (s *ECSSuite) TestECSProcessCheck() { assertProcessCollected(t, payloads, false, "stress-ng-cpu [run]") assertContainersCollected(t, payloads, []string{"stress-ng"}) } + +func (s *ECSEC2Suite) TestProcessCheckInCoreAgent() { + t := s.T() + // PROCS-4219 + flake.Mark(t) + + s.UpdateEnv(e2e.NewTypedPulumiProvisioner("ecsEC2CPUStress", ecsEC2CPUStressProvisioner(true), nil)) + + // Flush fake intake to remove any payloads which may have + s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + + var payloads []*aggregator.ProcessPayload + assert.EventuallyWithT(t, func(c *assert.CollectT) { + var err error + payloads, err = s.Env().FakeIntake.Client().GetProcesses() + assert.NoError(c, err, "failed to get process payloads from fakeintake") + + // Wait for two payloads, as processes must be detected in two check runs to be returned + assert.GreaterOrEqual(c, len(payloads), 2, "fewer than 2 payloads returned") + }, 2*time.Minute, 10*time.Second) + + assertProcessCollected(t, payloads, false, "stress-ng-cpu [run]") + requireProcessNotCollected(t, payloads, "process-agent") + assertContainersCollected(t, payloads, []string{"stress-ng"}) +} diff --git a/test/new-e2e/tests/security-agent-functional/security_agent_test.go b/test/new-e2e/tests/security-agent-functional/security_agent_test.go index 6606c33a6d600..94da861eb7fa6 100644 --- a/test/new-e2e/tests/security-agent-functional/security_agent_test.go +++ b/test/new-e2e/tests/security-agent-functional/security_agent_test.go @@ -21,7 +21,6 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows" - windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent" ) @@ -76,13 +75,7 @@ func (v *vmSuite) TestSystemProbeCWSSuite() { // install the agent (just so we can get the driver(s) installed) agentPackage, err := windowsAgent.GetPackageFromEnv() require.NoError(t, err) - remoteMSIPath, err := windowsCommon.GetTemporaryFile(vm) - require.NoError(t, err) - t.Logf("Getting install package %s...", agentPackage.URL) - err = windowsCommon.PutOrDownloadFile(vm, agentPackage.URL, remoteMSIPath) - require.NoError(t, err) - - err = windowsCommon.InstallMSI(vm, remoteMSIPath, "", "") + _, err = windowsAgent.InstallAgent(vm, windowsAgent.WithPackage(agentPackage)) t.Log("Install complete") require.NoError(t, err) diff --git a/test/new-e2e/tests/sysprobe-functional/apmtags_test.go b/test/new-e2e/tests/sysprobe-functional/apmtags_test.go index 4f6f26623170f..fad68dd02267d 100644 --- a/test/new-e2e/tests/sysprobe-functional/apmtags_test.go +++ b/test/new-e2e/tests/sysprobe-functional/apmtags_test.go @@ -59,8 +59,9 @@ type usmTaggingTest struct { clientJSONFile string clientAppConfig string - defaultFiles usmTaggingFiles - siteFiles map[string]usmTaggingFiles + defaultFiles usmTaggingFiles + siteFiles map[string]usmTaggingFiles + clientEnvVars map[string]string appFiles map[string]usmTaggingFiles @@ -217,7 +218,7 @@ func setupTest(vm *components.RemoteHost, test usmTaggingTest) error { testRoot := path.Join("c:", "users", "administrator") clientJSONFile := path.Join(testRoot, "datadog.json") - clientAppConfig := path.Join(testRoot, "app.config") + clientAppConfig := path.Join(testRoot, "littleget.exe.config") removeIfExists(vm, clientJSONFile) removeIfExists(vm, clientAppConfig) @@ -295,7 +296,10 @@ func (v *apmvmSuite) TestUSMAutoTaggingSuite() { testExe := path.Join("c:", "users", "administrator", "littleget.exe") vm.CopyFile("usmtest/littleget.exe", testExe) - pscommand := "%s -TargetHost localhost -TargetPort %s -TargetPath %s -ExpectedClientTags %s -ExpectedServerTags %s -ConnExe %s" + pipeExe := path.Join("c:", "users", "administrator", "NamedPipeCmd.exe") + vm.CopyFile("usmtest/NamedPipeCmd.exe", pipeExe) + + pscommand := "%s %s -TargetHost localhost -TargetPort %s -TargetPath %s -ExpectedClientTags %s -ExpectedServerTags %s -ConnExe %s" for _, test := range usmTaggingTests { v.Run(test.name, func() { @@ -315,7 +319,19 @@ func (v *apmvmSuite) TestUSMAutoTaggingSuite() { if test.targetPath != "" { targetpath = test.targetPath } - localcmd := fmt.Sprintf(pscommand, testScript, targetport, targetpath, strings.Join(test.expectedClientTags, ","), strings.Join(test.expectedServerTags, ","), testExe) + var envstring string + for k, v := range test.clientEnvVars { + envstring += fmt.Sprintf("$Env:%s=\"%s\" ; ", k, v) + } + localcmd := fmt.Sprintf(pscommand, envstring, testScript, targetport, targetpath, strings.Join(test.expectedClientTags, ","), strings.Join(test.expectedServerTags, ","), testExe) + + if len(test.clientEnvVars) > 0 { + var envarg string + for k, v := range test.clientEnvVars { + envarg += fmt.Sprintf("%s=%s", k, v) + } + } + out, err := vm.Execute(localcmd) if err != nil { t.Logf("Error running test: %v", out) diff --git a/test/new-e2e/tests/sysprobe-functional/apmtags_testdefs_test.go b/test/new-e2e/tests/sysprobe-functional/apmtags_testdefs_test.go index 817f59726f28e..d190fa1e8f297 100644 --- a/test/new-e2e/tests/sysprobe-functional/apmtags_testdefs_test.go +++ b/test/new-e2e/tests/sysprobe-functional/apmtags_testdefs_test.go @@ -31,6 +31,36 @@ var usmTaggingTests = []usmTaggingTest{ "'http.iis.sitename:Default Web Site'", }, }, + { + name: "all values server from json client from environment", + description: "Tests that for executables (not IIS), that settings are read from the environment", + clientJSONFile: "usmtest/client_all.json", + clientAppConfig: "", + defaultFiles: usmTaggingFiles{ + jsonFile: "usmtest/defaultsite_all.json", + appConfigFile: "", + }, + clientEnvVars: map[string]string{ + "DD_SERVICE": "webclient_env", + "DD_ENV": "testing_env_env", + "DD_VERSION": "1_env", + }, + serverSiteName: "", // empty is default site + serverSitePort: "80", + expectedClientTags: []string{ + "service:webclient_env", + "env:testing_env_env", + "version:1_env", + }, + expectedServerTags: []string{ + "service:defaultsite_json", + "env:testing_env_json", + "version:1_json", + "http.iis.site:1", + "http.iis.app_pool:DefaultAppPool", + "'http.iis.sitename:Default Web Site'", + }, + }, { name: "all values xml test 1", description: "Test with both json and app config provided, xml supercedes json", diff --git a/test/new-e2e/tests/sysprobe-functional/sysprobe_test.go b/test/new-e2e/tests/sysprobe-functional/sysprobe_test.go index 190e2cfffa937..4e41c685bbd7d 100644 --- a/test/new-e2e/tests/sysprobe-functional/sysprobe_test.go +++ b/test/new-e2e/tests/sysprobe-functional/sysprobe_test.go @@ -18,7 +18,6 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows" - windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent" componentsos "github.com/DataDog/test-infra-definitions/components/os" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" @@ -106,13 +105,7 @@ func (v *vmSuite) TestSystemProbeNPMSuite() { // install the agent (just so we can get the driver(s) installed) agentPackage, err := windowsAgent.GetPackageFromEnv() require.NoError(t, err) - remoteMSIPath, err := windowsCommon.GetTemporaryFile(vm) - require.NoError(t, err) - t.Logf("Getting install package %s...", agentPackage.URL) - err = windowsCommon.PutOrDownloadFile(vm, agentPackage.URL, remoteMSIPath) - require.NoError(t, err) - - err = windowsCommon.InstallMSI(vm, remoteMSIPath, "", "") + _, err = windowsAgent.InstallAgent(vm, windowsAgent.WithPackage(agentPackage)) t.Log("Install complete") require.NoError(t, err) diff --git a/test/new-e2e/tests/sysprobe-functional/usmtest/NamedPipeCmd.exe b/test/new-e2e/tests/sysprobe-functional/usmtest/NamedPipeCmd.exe new file mode 100644 index 0000000000000..7cd8ad103db82 Binary files /dev/null and b/test/new-e2e/tests/sysprobe-functional/usmtest/NamedPipeCmd.exe differ diff --git a/test/new-e2e/tests/sysprobe-functional/usmtest/test_tags.ps1 b/test/new-e2e/tests/sysprobe-functional/usmtest/test_tags.ps1 index c0ec58435d3ce..e6858296cdde5 100644 --- a/test/new-e2e/tests/sysprobe-functional/usmtest/test_tags.ps1 +++ b/test/new-e2e/tests/sysprobe-functional/usmtest/test_tags.ps1 @@ -30,7 +30,11 @@ function make-connectionrequest { } function get-connectionsendpoint { - $payload = ((iwr -UseBasicParsing -DisableKeepAlive http://localhost:3333/network_tracer/connections).content | ConvertFrom-Json) + $payload = (.\NamedPipeCmd.exe -method GET -path /network_tracer/connections -quiet) | convertfrom-json + if (! $?){ + Write-Host -ForegroundColor Red "Failed to get connection list" + exit 1 + } return $payload } diff --git a/test/new-e2e/tests/windows/common/agent/agent.go b/test/new-e2e/tests/windows/common/agent/agent.go index 693fa93db26fd..8833fb0f3cb6e 100644 --- a/test/new-e2e/tests/windows/common/agent/agent.go +++ b/test/new-e2e/tests/windows/common/agent/agent.go @@ -12,6 +12,7 @@ import ( "path/filepath" "strings" "testing" + "time" "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" @@ -20,6 +21,7 @@ import ( windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" infraCommon "github.com/DataDog/test-infra-definitions/common" + "github.com/cenkalti/backoff/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -72,13 +74,23 @@ func InstallAgent(host *components.RemoteHost, options ...InstallAgentOption) (s p.LocalInstallLogFile = filepath.Join(os.TempDir(), "install.log") } + downloadBackOff := p.DownloadMSIBackOff + if downloadBackOff == nil { + // 5s, 7s, 11s, 17s, 25s, 38s, 60s, 60s...for up to 5 minutes + downloadBackOff = backoff.NewExponentialBackOff( + backoff.WithInitialInterval(5*time.Second), + backoff.WithMaxInterval(60*time.Second), + backoff.WithMaxElapsedTime(5*time.Minute), + ) + } + args := p.toArgs() remoteMSIPath, err := windowsCommon.GetTemporaryFile(host) if err != nil { return "", err } - err = windowsCommon.PutOrDownloadFile(host, p.Package.URL, remoteMSIPath) + err = windowsCommon.PutOrDownloadFileWithRetry(host, p.Package.URL, remoteMSIPath, downloadBackOff) if err != nil { return "", err } diff --git a/test/new-e2e/tests/windows/common/agent/agent_install_params.go b/test/new-e2e/tests/windows/common/agent/agent_install_params.go index 51f4a0b951057..a4f37403d27c4 100644 --- a/test/new-e2e/tests/windows/common/agent/agent_install_params.go +++ b/test/new-e2e/tests/windows/common/agent/agent_install_params.go @@ -13,11 +13,15 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams/msi" + + "github.com/cenkalti/backoff/v4" ) // InstallAgentParams are the parameters used for installing the Agent using msiexec. type InstallAgentParams struct { - Package *Package + Package *Package + DownloadMSIBackOff backoff.BackOff + // Path on local test runner to save the MSI install log LocalInstallLogFile string @@ -146,6 +150,14 @@ func WithLastStablePackage() InstallAgentOption { } } +// WithDownloadMSIBackoff specifies the backoff strategy for downloading the MSI. +func WithDownloadMSIBackoff(backoff backoff.BackOff) InstallAgentOption { + return func(i *InstallAgentParams) error { + i.DownloadMSIBackOff = backoff + return nil + } +} + // WithFakeIntake configures the Agent to use a fake intake URL. func WithFakeIntake(fakeIntake *components.FakeIntake) InstallAgentOption { return func(i *InstallAgentParams) error { diff --git a/test/new-e2e/tests/windows/common/agent/package.go b/test/new-e2e/tests/windows/common/agent/package.go index 5e83cfdc9c02b..8e3f867f41a80 100644 --- a/test/new-e2e/tests/windows/common/agent/package.go +++ b/test/new-e2e/tests/windows/common/agent/package.go @@ -7,7 +7,6 @@ package agent import ( - "context" "fmt" "os" "strings" @@ -15,9 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent/installers/v2" - "github.com/aws/aws-sdk-go-v2/aws" - awsConfig "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/pipeline" ) const ( @@ -25,7 +22,6 @@ const ( defaultArch = "x86_64" agentInstallerListProductName = "datadog-agent" agentS3BucketRelease = "ddagent-windows-stable" - agentS3BucketTesting = "dd-agent-mstesting" betaChannel = "beta" betaURL = "https://s3.amazonaws.com/dd-agent-mstesting/builds/beta/installers_v2.json" stableChannel = "stable" @@ -108,53 +104,34 @@ func GetLatestMSIURL(majorVersion string, arch string) string { // majorVersion: 6, 7 // arch: x86_64 func GetPipelineMSIURL(pipelineID string, majorVersion string, arch string) (string, error) { - // dd-agent-mstesting is a public bucket so we can use anonymous credentials - config, err := awsConfig.LoadDefaultConfig(context.Background(), awsConfig.WithCredentialsProvider(aws.AnonymousCredentials{})) - if err != nil { - return "", err - } - - s3Client := s3.NewFromConfig(config) - // Manual URL example: https://s3.amazonaws.com/dd-agent-mstesting?prefix=pipelines/A7/25309493 fmt.Printf("Looking for agent MSI for pipeline majorVersion %v %v\n", majorVersion, pipelineID) - result, err := s3Client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{ - Bucket: aws.String(agentS3BucketTesting), - Prefix: aws.String(fmt.Sprintf("pipelines/A%s/%s", majorVersion, pipelineID)), - }) - - if err != nil { - return "", err - } - - if len(result.Contents) <= 0 { - return "", fmt.Errorf("no agent MSI found for pipeline %v", pipelineID) - } - - // In case there are multiple artifacts, try to match the right one - // This is only here as a workaround for a CI issue that can cause artifacts - // from different pipelines to be mixed together. This should be removed once - // the issue is resolved. - // TODO: CIREL-1970 - for _, obj := range result.Contents { + artifactURL, err := pipeline.GetPipelineArtifact(pipelineID, pipeline.AgentS3BucketTesting, majorVersion, func(artifact string) bool { + // In case there are multiple artifacts, try to match the right one + // This is only here as a workaround for a CI issue that can cause artifacts + // from different pipelines to be mixed together. This should be removed once + // the issue is resolved. + // TODO: CIREL-1970 // Example: datadog-agent-7.52.0-1-x86_64.msi // Example: datadog-agent-7.53.0-devel.git.512.41b1225.pipeline.30353507-1-x86_64.msi - if !strings.Contains(*obj.Key, fmt.Sprintf("datadog-agent-%s", majorVersion)) { - continue + if !strings.Contains(artifact, fmt.Sprintf("datadog-agent-%s", majorVersion)) { + return false } // Not all pipelines include the pipeline ID in the artifact name, but if it is there then match against it - if strings.Contains(*obj.Key, "pipeline.") && - !strings.Contains(*obj.Key, fmt.Sprintf("pipeline.%s", pipelineID)) { - continue + if strings.Contains(artifact, "pipeline.") && + !strings.Contains(artifact, fmt.Sprintf("pipeline.%s", pipelineID)) { + return false } - if !strings.Contains(*obj.Key, fmt.Sprintf("-%s.msi", arch)) { - continue + if !strings.Contains(artifact, fmt.Sprintf("-%s.msi", arch)) { + return false } - - return fmt.Sprintf("https://s3.amazonaws.com/%s/%s", agentS3BucketTesting, *obj.Key), nil + // match! + return true + }) + if err != nil { + return "", fmt.Errorf("no agent MSI found for pipeline %v and arch %v: %w", pipelineID, arch, err) } - - return "", fmt.Errorf("no agent MSI found for pipeline %v and arch %v", pipelineID, arch) + return artifactURL, nil } // LookupChannelFromEnv looks at environment variabes to select the agent channel, if the value diff --git a/test/new-e2e/tests/windows/common/network.go b/test/new-e2e/tests/windows/common/network.go index af4291c732008..2a8e98f0266ca 100644 --- a/test/new-e2e/tests/windows/common/network.go +++ b/test/new-e2e/tests/windows/common/network.go @@ -11,6 +11,8 @@ import ( "strings" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" + + "github.com/cenkalti/backoff/v4" ) // BoundPort represents a port that is bound to a process @@ -80,8 +82,27 @@ func ListBoundPorts(host *components.RemoteHost) ([]*BoundPort, error) { // If the URL is a local file, it will be uploaded to the VM. // If the URL is a remote file, it will be downloaded from the VM func PutOrDownloadFile(host *components.RemoteHost, url string, destination string) error { + // no retry + return PutOrDownloadFileWithRetry(host, url, destination, &backoff.StopBackOff{}) +} + +// PutOrDownloadFileWithRetry is similar to PutOrDownloadFile but retries on download failure, +// local file copy is not retried. +func PutOrDownloadFileWithRetry(host *components.RemoteHost, url string, destination string, b backoff.BackOff) error { if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { - return DownloadFile(host, url, destination) + err := backoff.Retry(func() error { + return DownloadFile(host, url, destination) + // TODO: it would be neat to only retry on web related errors but + // we don't have a way to distinguish them since DownloadFile + // throws a WebException for non web related errors such as + // filename is null or Empty. + // https://learn.microsoft.com/en-us/dotnet/api/system.net.webclient.downloadfile + // example error: Exception calling "DownloadFile" with "2" argument(s): "The remote server returned an error: (503) + }, b) + if err != nil { + return err + } + return nil } if strings.HasPrefix(url, "file://") { diff --git a/test/new-e2e/tests/windows/install-test/installtester.go b/test/new-e2e/tests/windows/install-test/installtester.go index 95a5c15785b52..8e8f74cdf0cb6 100644 --- a/test/new-e2e/tests/windows/install-test/installtester.go +++ b/test/new-e2e/tests/windows/install-test/installtester.go @@ -164,17 +164,11 @@ func (t *Tester) runTestsForKitchenCompat(tt *testing.T) { common.CheckIntegrationInstall(tt, t.InstallTestClient) tt.Run("default python version", func(tt *testing.T) { - pythonVersion, err := t.InstallTestClient.GetPythonVersion() - if !assert.NoError(tt, err, "should get python version") { - return - } - majorPythonVersion := strings.Split(pythonVersion, ".")[0] - + expected := common.ExpectedPythonVersion3 if t.ExpectPython2Installed() { - assert.Equal(tt, "2", majorPythonVersion, "Agent 6 should install Python 2") - } else { - assert.Equal(tt, "3", majorPythonVersion, "Agent should install Python 3") + expected = common.ExpectedPythonVersion2 } + common.CheckAgentPython(tt, t.InstallTestClient, expected) }) if t.ExpectPython2Installed() { diff --git a/test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml b/test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml new file mode 100644 index 0000000000000..bbfbac7b00d97 --- /dev/null +++ b/test/new-e2e/tests/windows/service-test/fixtures/system-probe-nofim.yaml @@ -0,0 +1,8 @@ +# enable NPM +network_config: + enabled: true + +# enable security agent +runtime_security_config: + enabled: true + fim_enabled: false diff --git a/test/new-e2e/tests/windows/service-test/startstop_test.go b/test/new-e2e/tests/windows/service-test/startstop_test.go index 14bd97dbabfbd..8d7a8f6dacb87 100644 --- a/test/new-e2e/tests/windows/service-test/startstop_test.go +++ b/test/new-e2e/tests/windows/service-test/startstop_test.go @@ -36,13 +36,28 @@ var agentConfig string //go:embed fixtures/system-probe.yaml var systemProbeConfig string +//go:embed fixtures/system-probe-nofim.yaml +var systemProbeNoFIMConfig string + //go:embed fixtures/security-agent.yaml var securityAgentConfig string +// TestServiceBehaviorAgentCommandNoFIM tests the service behavior when controlled by Agent commands +func TestNoFIMServiceBehaviorAgentCommand(t *testing.T) { + s := &agentServiceCommandSuite{} + run(t, s, systemProbeNoFIMConfig) +} + +// TestServiceBehaviorPowerShellNoFIM tests the service behavior when controlled by PowerShell commands +func TestNoFIMServiceBehaviorPowerShell(t *testing.T) { + s := &powerShellServiceCommandSuite{} + run(t, s, systemProbeNoFIMConfig) +} + // TestServiceBehaviorAgentCommand tests the service behavior when controlled by Agent commands func TestServiceBehaviorAgentCommand(t *testing.T) { s := &agentServiceCommandSuite{} - run(t, s) + run(t, s, systemProbeConfig) } type agentServiceCommandSuite struct { @@ -78,7 +93,7 @@ func (s *agentServiceCommandSuite) SetupSuite() { // TestServiceBehaviorAgentCommand tests the service behavior when controlled by PowerShell commands func TestServiceBehaviorPowerShell(t *testing.T) { s := &powerShellServiceCommandSuite{} - run(t, s) + run(t, s, systemProbeConfig) } type powerShellServiceCommandSuite struct { @@ -204,7 +219,7 @@ func (s *powerShellServiceCommandSuite) TestHardExitEventLogEntry() { }, 1*time.Minute, 1*time.Second, "should have hard exit messages in the event log") } -func run[Env any](t *testing.T, s e2e.Suite[Env]) { +func run[Env any](t *testing.T, s e2e.Suite[Env], systemProbeConfig string) { opts := []e2e.SuiteOption{e2e.WithProvisioner(awsHostWindows.ProvisionerNoFakeIntake( awsHostWindows.WithAgentOptions( agentparams.WithAgentConfig(agentConfig), diff --git a/test/otel/go.mod b/test/otel/go.mod index 1a1e56a250a8a..69222235a4400 100644 --- a/test/otel/go.mod +++ b/test/otel/go.mod @@ -36,7 +36,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/env => ./../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ./../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ./../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ./../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../pkg/config/structure + github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ./../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ./../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ./../../pkg/logs/client @@ -98,8 +101,8 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.56.0-rc.1 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 ) @@ -108,7 +111,7 @@ require ( github.com/DataDog/agent-payload/v5 v5.0.119 // indirect github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect @@ -121,9 +124,12 @@ require ( github.com/DataDog/datadog-agent/comp/trace/compression/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 // indirect @@ -151,33 +157,33 @@ require ( github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect github.com/DataDog/datadog-go/v5 v5.5.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect - github.com/DataDog/go-sqllexer v0.0.14 // indirect + github.com/DataDog/go-sqllexer v0.0.15 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/DataDog/zstd v1.5.5 // indirect @@ -293,12 +299,12 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.6.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect google.golang.org/grpc v1.64.0 // indirect diff --git a/test/otel/go.sum b/test/otel/go.sum index cdf2e4f5f136d..1b9f4ba2789ad 100644 --- a/test/otel/go.sum +++ b/test/otel/go.sum @@ -8,24 +8,24 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= -github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= -github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.15 h1:rUUu52dP8EQhJLnUw0MIAxZp0BQx2fOTuMztr3vtHUU= +github.com/DataDog/go-sqllexer v0.0.15/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0 h1:KNiq6ofE5BBMQjl7w9fftg8z44C9z51w7qOWIKs5SCg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 h1:FaUFQE8IuaNdpOQGIhoy2h58v8AVND+yZG3gVqKAwLQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 h1:px2+7svK86oeCGd+sT1x/9f0pqIJdApGFnWI0AOPXwA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1/go.mod h1:+LijQ2LdlocAQ4WB+7KsoIGe90bfogkRslubd9swVow= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 h1:Fija8Qo0z/HngskYyBpMqmJKM2ejNr1NfXUyWszFDAw= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0/go.mod h1:lNu6vfFNCV/tyWxs8x8nCN1TqK+bPeI2dbnlwFTs8VA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 h1:x6re32f8gQ8fdCllywQyAbxQuXNrgxeimpLBfvwA97g= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0/go.mod h1:R84ZVbxKSgMxzvJro/MftVrlkGm2C2gndUhV35wyR8A= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= @@ -470,11 +470,11 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -497,8 +497,8 @@ golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -530,15 +530,15 @@ golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= diff --git a/test/regression/cases/idle/lading/lading.yaml b/test/regression/cases/idle/lading/lading.yaml index 0ae7c415523d8..5e2eb2566ef45 100644 --- a/test/regression/cases/idle/lading/lading.yaml +++ b/test/regression/cases/idle/lading/lading.yaml @@ -7,5 +7,170 @@ blackhole: binding_addr: "127.0.0.1:9092" target_metrics: - - prometheus: + - prometheus: #core agent telemetry uri: "http://127.0.0.1:5000/telemetry" + tags: + sub_agent: "core" + - prometheus: #process agent telemetry + uri: "http://127.0.0.1:6062/telemetry" + tags: + sub_agent: "process" + - expvar: #trace agent telemetry + uri: "http://127.0.0.1:5012/debug/vars" + vars: + - "/Event" + - "/ServiceCheck" + - "/check_run_v1" + - "/cmdline" + - "/compressor/BytesIn" + - "/compressor/BytesOut" + - "/compressor/TotalCompressCycles" + - "/compressor/TotalPayloads" + - "/connections" + - "/container" + - "/events_v2" + - "/forwarder/APIKeyFailure" + - "/forwarder/APIKeyStatus" + - "/forwarder/FileStorage/CurrentSizeInBytes" + - "/forwarder/FileStorage/DeserializeCount" + - "/forwarder/FileStorage/DeserializeErrorsCount" + - "/forwarder/FileStorage/DeserializeTransactionsCount" + - "/forwarder/FileStorage/FileSize" + - "/forwarder/FileStorage/FilesCount" + - "/forwarder/FileStorage/FilesRemovedCount" + - "/forwarder/FileStorage/PointsDroppedCount" + - "/forwarder/FileStorage/SerializeCount" + - "/forwarder/FileStorage/StartupReloadedRetryFilesCount" + - "/forwarder/RemovalPolicy/FilesFromUnknownDomainCount" + - "/forwarder/RemovalPolicy/NewRemovalPolicyCount" + - "/forwarder/RemovalPolicy/OutdatedFilesCount" + - "/forwarder/RemovalPolicy/RegisteredDomainCount" + - "/forwarder/TransactionContainer/CurrentMemSizeInBytes" + - "/forwarder/TransactionContainer/ErrorsCount" + - "/forwarder/TransactionContainer/PointsDroppedCount" + - "/forwarder/TransactionContainer/TransactionsCount" + - "/forwarder/TransactionContainer/TransactionsDroppedCount" + - "/forwarder/Transactions/Cluster" + - "/forwarder/Transactions/ClusterRole" + - "/forwarder/Transactions/ClusterRoleBinding" + - "/forwarder/Transactions/ConnectionEvents/ConnectSuccess" + - "/forwarder/Transactions/ConnectionEvents/DNSSuccess" + - "/forwarder/Transactions/CronJob" + - "/forwarder/Transactions/CustomResource" + - "/forwarder/Transactions/CustomResourceDefinition" + - "/forwarder/Transactions/DaemonSet" + - "/forwarder/Transactions/Deployment" + - "/forwarder/Transactions/Dropped" + - "/forwarder/Transactions/DroppedByEndpoint" + - "/forwarder/Transactions/ECSTask" + - "/forwarder/Transactions/Errors" + - "/forwarder/Transactions/ErrorsByType/ConnectionErrors" + - "/forwarder/Transactions/ErrorsByType/DNSErrors" + - "/forwarder/Transactions/ErrorsByType/SentRequestErrors" + - "/forwarder/Transactions/ErrorsByType/TLSErrors" + - "/forwarder/Transactions/ErrorsByType/WroteRequestErrors" + - "/forwarder/Transactions/HTTPErrors" + - "/forwarder/Transactions/HTTPErrorsByCode" + - "/forwarder/Transactions/HighPriorityQueueFull" + - "/forwarder/Transactions/HorizontalPodAutoscaler" + - "/forwarder/Transactions/Ingress" + - "/forwarder/Transactions/InputBytesByEndpoint" + - "/forwarder/Transactions/InputCountByEndpoint" + - "/forwarder/Transactions/Job" + - "/forwarder/Transactions/LimitRange" + - "/forwarder/Transactions/Namespace" + - "/forwarder/Transactions/NetworkPolicy" + - "/forwarder/Transactions/Node" + - "/forwarder/Transactions/OrchestratorManifest" + - "/forwarder/Transactions/PersistentVolume" + - "/forwarder/Transactions/PersistentVolumeClaim" + - "/forwarder/Transactions/Pod" + - "/forwarder/Transactions/ReplicaSet" + - "/forwarder/Transactions/Requeued" + - "/forwarder/Transactions/RequeuedByEndpoint" + - "/forwarder/Transactions/Retried" + - "/forwarder/Transactions/RetriedByEndpoint" + - "/forwarder/Transactions/RetryQueueSize" + - "/forwarder/Transactions/Role" + - "/forwarder/Transactions/RoleBinding" + - "/forwarder/Transactions/Service" + - "/forwarder/Transactions/ServiceAccount" + - "/forwarder/Transactions/StatefulSet" + - "/forwarder/Transactions/StorageClass" + - "/forwarder/Transactions/Success" + - "/forwarder/Transactions/SuccessByEndpoint/check_run_v1" + - "/forwarder/Transactions/SuccessByEndpoint/connections" + - "/forwarder/Transactions/SuccessByEndpoint/container" + - "/forwarder/Transactions/SuccessByEndpoint/events_v2" + - "/forwarder/Transactions/SuccessByEndpoint/host_metadata_v2" + - "/forwarder/Transactions/SuccessByEndpoint/intake" + - "/forwarder/Transactions/SuccessByEndpoint/orchestrator" + - "/forwarder/Transactions/SuccessByEndpoint/process" + - "/forwarder/Transactions/SuccessByEndpoint/rtcontainer" + - "/forwarder/Transactions/SuccessByEndpoint/rtprocess" + - "/forwarder/Transactions/SuccessByEndpoint/series_v1" + - "/forwarder/Transactions/SuccessByEndpoint/series_v2" + - "/forwarder/Transactions/SuccessByEndpoint/services_checks_v2" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v1" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v2" + - "/forwarder/Transactions/SuccessByEndpoint/validate_v1" + - "/forwarder/Transactions/SuccessBytesByEndpoint" + - "/forwarder/Transactions/VerticalPodAutoscaler" + - "/host_metadata_v2" + - "/hostname/errors" + - "/hostname/provider" + - "/intake" + - "/jsonstream/CompressorLocks" + - "/jsonstream/ItemDrops" + - "/jsonstream/PayloadFulls" + - "/jsonstream/TotalCalls" + - "/jsonstream/TotalItems" + - "/jsonstream/TotalLockTime" + - "/jsonstream/TotalSerializationTime" + - "/jsonstream/WriteItemErrors" + - "/kubeletQueries" + - "/orchestrator" + - "/pid" + - "/process" + - "/rtcontainer" + - "/rtprocess" + - "/serializer/SendEventsErrItemTooBigs" + - "/serializer/SendEventsErrItemTooBigsFallback" + - "/series" + - "/series_v1" + - "/series_v2" + - "/services_checks_v2" + - "/sketch_series/ItemTooBig" + - "/sketch_series/PayloadFull" + - "/sketch_series/UnexpectedItemDrops" + - "/sketches_v1" + - "/sketches_v2" + - "/splitter/NotTooBig" + - "/splitter/PayloadDrops" + - "/splitter/TooBig" + - "/splitter/TotalLoops" + - "/stats_writer/Bytes" + - "/stats_writer/ClientPayloads" + - "/stats_writer/Errors" + - "/stats_writer/Payloads" + - "/stats_writer/Retries" + - "/stats_writer/Splits" + - "/stats_writer/StatsBuckets" + - "/stats_writer/StatsEntries" + - "/trace_writer/Bytes" + - "/trace_writer/BytesUncompressed" + - "/trace_writer/Errors" + - "/trace_writer/Events" + - "/trace_writer/Payloads" + - "/trace_writer/Retries" + - "/trace_writer/SingleMaxSize" + - "/trace_writer/Spans" + - "/trace_writer/Traces" + - "/uptime" + - "/validate_v1" + - "/version/Version" + - "/version/GitCommit" + - "/watchdog/CPU/UserAvg" + - "/watchdog/Mem/Alloc" + tags: + sub_agent: "trace" diff --git a/test/regression/cases/idle_all_features/datadog-agent/datadog.yaml b/test/regression/cases/idle_all_features/datadog-agent/datadog.yaml new file mode 100644 index 0000000000000..1960b9b64d8f0 --- /dev/null +++ b/test/regression/cases/idle_all_features/datadog-agent/datadog.yaml @@ -0,0 +1,74 @@ +auth_token_file_path: /tmp/agent-auth-token +hostname: smp-regression + +dd_url: http://127.0.0.1:9092 + +confd_path: /etc/datadog-agent/conf.d + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +dogstatsd_socket: '/tmp/dsd.socket' + +logs_enabled: true + +apm_config: + enabled: true + +process_config: + process_collection: + enabled: true + container_collection: + enabled: true + +network_path: + connections_monitoring: + enabled: true + +runtime_security_config: + ## Set to true to enable Threat Detection + enabled: true + +cluster_checks: + enabled: true + +otlp_config: + metrics: + enabled: true + traces: + enabled: true + logs: + enabled: true + +system_probe_config: + enabled: true + +network_config: + enabled: true + +# Per Cloud Security Management setup documentation +# https://docs.datadoghq.com/security/cloud_security_management/setup/agent/linux/ +remote_configuration: + # SMP environment does not support remote config currently. + enabled: false + +compliance_config: + ## Set to true to enable CIS benchmarks for Misconfigurations. + enabled: true + host_benchmarks: + enabled: true + +# Vulnerabilities are evaluated and scanned against your containers and hosts every hour. +sbom: + enabled: true + # Set to true to enable Container Vulnerability Management + container_image: + enabled: true + # Set to true to enable Host Vulnerability Management + host: + enabled: true + +container_image: + enabled: true diff --git a/test/regression/cases/idle_all_features/datadog-agent/security-agent.yaml b/test/regression/cases/idle_all_features/datadog-agent/security-agent.yaml new file mode 100644 index 0000000000000..d9ce27c518a1a --- /dev/null +++ b/test/regression/cases/idle_all_features/datadog-agent/security-agent.yaml @@ -0,0 +1,13 @@ +# Per https://docs.datadoghq.com/security/cloud_security_management/setup/agent/linux/ +runtime_security_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable Threat Detection + enabled: true + +compliance_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable CIS benchmarks for Misconfigurations. + # + enabled: true + host_benchmarks: + enabled: true diff --git a/test/regression/cases/idle_all_features/datadog-agent/system-probe.yaml b/test/regression/cases/idle_all_features/datadog-agent/system-probe.yaml new file mode 100644 index 0000000000000..a7da3c9140d50 --- /dev/null +++ b/test/regression/cases/idle_all_features/datadog-agent/system-probe.yaml @@ -0,0 +1,10 @@ +# Per https://docs.datadoghq.com/security/cloud_security_management/setup/agent/linux/ + +runtime_security_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable Threat Detection + enabled: true + + remote_configuration: + ## @param enabled - boolean - optional - default: false + enabled: true diff --git a/test/regression/cases/idle_all_features/experiment.yaml b/test/regression/cases/idle_all_features/experiment.yaml new file mode 100644 index 0000000000000..34c27f279d298 --- /dev/null +++ b/test/regression/cases/idle_all_features/experiment.yaml @@ -0,0 +1,30 @@ +# Agent 'all features enabled' idle experiment. Represents an agent install with +# all sub-agents enabled in configuration and no active workload. + +optimization_goal: memory +erratic: false + +target: + name: datadog-agent + command: /bin/entrypoint.sh + + environment: + DD_TELEMETRY_ENABLED: true + DD_API_KEY: 00000001 + DD_HOSTNAME: smp-regression + DD_DD_URL: http://127.0.0.1:9092 + + profiling_environment: + DD_INTERNAL_PROFILING_BLOCK_PROFILE_RATE: 10000 + DD_INTERNAL_PROFILING_CPU_DURATION: 1m + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLED: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + DD_INTERNAL_PROFILING_MUTEX_PROFILE_FRACTION: 10 + DD_INTERNAL_PROFILING_PERIOD: 1m + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_PROFILING_EXECUTION_TRACE_ENABLED: true + DD_PROFILING_EXECUTION_TRACE_PERIOD: 1m + DD_PROFILING_WAIT_PROFILE: true + + DD_INTERNAL_PROFILING_EXTRA_TAGS: experiment:idle_all_features diff --git a/test/regression/cases/idle_all_features/lading/lading.yaml b/test/regression/cases/idle_all_features/lading/lading.yaml new file mode 100644 index 0000000000000..52888afb7176d --- /dev/null +++ b/test/regression/cases/idle_all_features/lading/lading.yaml @@ -0,0 +1,177 @@ +generator: [] + +blackhole: + - http: + binding_addr: "127.0.0.1:9091" + - http: + binding_addr: "127.0.0.1:9092" + +target_metrics: + - prometheus: #core agent telemetry + uri: "http://127.0.0.1:5000/telemetry" + tags: + sub_agent: "core" + - prometheus: #process agent telemetry + uri: "http://127.0.0.1:6062/telemetry" + tags: + sub_agent: "process" + - expvar: #trace agent telemetry + uri: "http://127.0.0.1:5012/debug/vars" + vars: + - "/Event" + - "/ServiceCheck" + - "/check_run_v1" + - "/cmdline" + - "/compressor/BytesIn" + - "/compressor/BytesOut" + - "/compressor/TotalCompressCycles" + - "/compressor/TotalPayloads" + - "/connections" + - "/container" + - "/events_v2" + - "/forwarder/APIKeyFailure" + - "/forwarder/APIKeyStatus" + - "/forwarder/FileStorage/CurrentSizeInBytes" + - "/forwarder/FileStorage/DeserializeCount" + - "/forwarder/FileStorage/DeserializeErrorsCount" + - "/forwarder/FileStorage/DeserializeTransactionsCount" + - "/forwarder/FileStorage/FileSize" + - "/forwarder/FileStorage/FilesCount" + - "/forwarder/FileStorage/FilesRemovedCount" + - "/forwarder/FileStorage/PointsDroppedCount" + - "/forwarder/FileStorage/SerializeCount" + - "/forwarder/FileStorage/StartupReloadedRetryFilesCount" + - "/forwarder/RemovalPolicy/FilesFromUnknownDomainCount" + - "/forwarder/RemovalPolicy/NewRemovalPolicyCount" + - "/forwarder/RemovalPolicy/OutdatedFilesCount" + - "/forwarder/RemovalPolicy/RegisteredDomainCount" + - "/forwarder/TransactionContainer/CurrentMemSizeInBytes" + - "/forwarder/TransactionContainer/ErrorsCount" + - "/forwarder/TransactionContainer/PointsDroppedCount" + - "/forwarder/TransactionContainer/TransactionsCount" + - "/forwarder/TransactionContainer/TransactionsDroppedCount" + - "/forwarder/Transactions/Cluster" + - "/forwarder/Transactions/ClusterRole" + - "/forwarder/Transactions/ClusterRoleBinding" + - "/forwarder/Transactions/ConnectionEvents/ConnectSuccess" + - "/forwarder/Transactions/ConnectionEvents/DNSSuccess" + - "/forwarder/Transactions/CronJob" + - "/forwarder/Transactions/CustomResource" + - "/forwarder/Transactions/CustomResourceDefinition" + - "/forwarder/Transactions/DaemonSet" + - "/forwarder/Transactions/Deployment" + - "/forwarder/Transactions/Dropped" + - "/forwarder/Transactions/DroppedByEndpoint" + - "/forwarder/Transactions/ECSTask" + - "/forwarder/Transactions/Errors" + - "/forwarder/Transactions/ErrorsByType/ConnectionErrors" + - "/forwarder/Transactions/ErrorsByType/DNSErrors" + - "/forwarder/Transactions/ErrorsByType/SentRequestErrors" + - "/forwarder/Transactions/ErrorsByType/TLSErrors" + - "/forwarder/Transactions/ErrorsByType/WroteRequestErrors" + - "/forwarder/Transactions/HTTPErrors" + - "/forwarder/Transactions/HTTPErrorsByCode" + - "/forwarder/Transactions/HighPriorityQueueFull" + - "/forwarder/Transactions/HorizontalPodAutoscaler" + - "/forwarder/Transactions/Ingress" + - "/forwarder/Transactions/InputBytesByEndpoint" + - "/forwarder/Transactions/InputCountByEndpoint" + - "/forwarder/Transactions/Job" + - "/forwarder/Transactions/LimitRange" + - "/forwarder/Transactions/Namespace" + - "/forwarder/Transactions/NetworkPolicy" + - "/forwarder/Transactions/Node" + - "/forwarder/Transactions/OrchestratorManifest" + - "/forwarder/Transactions/PersistentVolume" + - "/forwarder/Transactions/PersistentVolumeClaim" + - "/forwarder/Transactions/Pod" + - "/forwarder/Transactions/ReplicaSet" + - "/forwarder/Transactions/Requeued" + - "/forwarder/Transactions/RequeuedByEndpoint" + - "/forwarder/Transactions/Retried" + - "/forwarder/Transactions/RetriedByEndpoint" + - "/forwarder/Transactions/RetryQueueSize" + - "/forwarder/Transactions/Role" + - "/forwarder/Transactions/RoleBinding" + - "/forwarder/Transactions/Service" + - "/forwarder/Transactions/ServiceAccount" + - "/forwarder/Transactions/StatefulSet" + - "/forwarder/Transactions/StorageClass" + - "/forwarder/Transactions/Success" + - "/forwarder/Transactions/SuccessByEndpoint/check_run_v1" + - "/forwarder/Transactions/SuccessByEndpoint/connections" + - "/forwarder/Transactions/SuccessByEndpoint/container" + - "/forwarder/Transactions/SuccessByEndpoint/events_v2" + - "/forwarder/Transactions/SuccessByEndpoint/host_metadata_v2" + - "/forwarder/Transactions/SuccessByEndpoint/intake" + - "/forwarder/Transactions/SuccessByEndpoint/orchestrator" + - "/forwarder/Transactions/SuccessByEndpoint/process" + - "/forwarder/Transactions/SuccessByEndpoint/rtcontainer" + - "/forwarder/Transactions/SuccessByEndpoint/rtprocess" + - "/forwarder/Transactions/SuccessByEndpoint/series_v1" + - "/forwarder/Transactions/SuccessByEndpoint/series_v2" + - "/forwarder/Transactions/SuccessByEndpoint/services_checks_v2" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v1" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v2" + - "/forwarder/Transactions/SuccessByEndpoint/validate_v1" + - "/forwarder/Transactions/SuccessBytesByEndpoint" + - "/forwarder/Transactions/VerticalPodAutoscaler" + - "/host_metadata_v2" + - "/hostname/errors" + - "/hostname/provider" + - "/intake" + - "/jsonstream/CompressorLocks" + - "/jsonstream/ItemDrops" + - "/jsonstream/PayloadFulls" + - "/jsonstream/TotalCalls" + - "/jsonstream/TotalItems" + - "/jsonstream/TotalLockTime" + - "/jsonstream/TotalSerializationTime" + - "/jsonstream/WriteItemErrors" + - "/kubeletQueries" + - "/orchestrator" + - "/pid" + - "/process" + - "/rtcontainer" + - "/rtprocess" + - "/serializer/SendEventsErrItemTooBigs" + - "/serializer/SendEventsErrItemTooBigsFallback" + - "/series" + - "/series_v1" + - "/series_v2" + - "/services_checks_v2" + - "/sketch_series/ItemTooBig" + - "/sketch_series/PayloadFull" + - "/sketch_series/UnexpectedItemDrops" + - "/sketches_v1" + - "/sketches_v2" + - "/splitter/NotTooBig" + - "/splitter/PayloadDrops" + - "/splitter/TooBig" + - "/splitter/TotalLoops" + - "/stats_writer/Bytes" + - "/stats_writer/ClientPayloads" + - "/stats_writer/Errors" + - "/stats_writer/Payloads" + - "/stats_writer/Retries" + - "/stats_writer/Splits" + - "/stats_writer/StatsBuckets" + - "/stats_writer/StatsEntries" + - "/trace_writer/Bytes" + - "/trace_writer/BytesUncompressed" + - "/trace_writer/Errors" + - "/trace_writer/Events" + - "/trace_writer/Payloads" + - "/trace_writer/Retries" + - "/trace_writer/SingleMaxSize" + - "/trace_writer/Spans" + - "/trace_writer/Traces" + - "/uptime" + - "/validate_v1" + - "/version/Version" + - "/version/GitCommit" + - "/watchdog/CPU/UserAvg" + - "/watchdog/Mem/Alloc" + tags: + sub_agent: "trace" + diff --git a/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml b/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml index 35593c48fbd5c..755c07c819559 100644 --- a/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml +++ b/test/regression/cases/otel_to_otel_logs/datadog-agent/datadog.yaml @@ -36,4 +36,4 @@ otlp_config: traces: enabled: true debug: - loglevel: info + verbosity: normal diff --git a/test/regression/config.yaml b/test/regression/config.yaml index 9415547229ccf..4a678bef04087 100644 --- a/test/regression/config.yaml +++ b/test/regression/config.yaml @@ -1,5 +1,5 @@ lading: - version: 0.23.0 + version: 0.23.2 target: cpu_allotment: 8 diff --git a/test/required_files/agent-deb.txt b/test/required_files/agent-deb.txt new file mode 100644 index 0000000000000..5c5132de76164 --- /dev/null +++ b/test/required_files/agent-deb.txt @@ -0,0 +1,14 @@ +/lib/systemd/system/datadog-agent-security.service +/lib/systemd/system/datadog-agent-process.service +/lib/systemd/system/datadog-agent.service +/lib/systemd/system/datadog-agent-trace.service +/lib/systemd/system/datadog-agent-sysprobe.service +/etc/init.d/datadog-agent-process +/etc/init.d/datadog-agent-security +/etc/init.d/datadog-agent +/etc/init.d/datadog-agent-trace +/etc/init/datadog-agent.conf +/etc/init/datadog-agent-sysprobe.conf +/etc/init/datadog-agent-trace.conf +/etc/init/datadog-agent-process.conf +/etc/init/datadog-agent-security.conf diff --git a/test/required_files/agent-rpm.txt b/test/required_files/agent-rpm.txt new file mode 100644 index 0000000000000..bb5183bba4e15 --- /dev/null +++ b/test/required_files/agent-rpm.txt @@ -0,0 +1,10 @@ +/usr/lib/systemd/system/datadog-agent-security.service +/usr/lib/systemd/system/datadog-agent-process.service +/usr/lib/systemd/system/datadog-agent.service +/usr/lib/systemd/system/datadog-agent-trace.service +/usr/lib/systemd/system/datadog-agent-sysprobe.service +/etc/init/datadog-agent.conf +/etc/init/datadog-agent-sysprobe.conf +/etc/init/datadog-agent-trace.conf +/etc/init/datadog-agent-process.conf +/etc/init/datadog-agent-security.conf diff --git a/test/required_files/dogstatsd-deb.txt b/test/required_files/dogstatsd-deb.txt new file mode 100644 index 0000000000000..70c7c8de422d3 --- /dev/null +++ b/test/required_files/dogstatsd-deb.txt @@ -0,0 +1 @@ +/lib/systemd/system/datadog-dogstatsd.service diff --git a/test/required_files/dogstatsd-rpm.txt b/test/required_files/dogstatsd-rpm.txt new file mode 100644 index 0000000000000..70c7c8de422d3 --- /dev/null +++ b/test/required_files/dogstatsd-rpm.txt @@ -0,0 +1 @@ +/lib/systemd/system/datadog-dogstatsd.service diff --git a/test/required_files/iot-agent-deb.txt b/test/required_files/iot-agent-deb.txt new file mode 100644 index 0000000000000..864131156691d --- /dev/null +++ b/test/required_files/iot-agent-deb.txt @@ -0,0 +1 @@ +/lib/systemd/system/datadog-agent.service diff --git a/test/required_files/iot-agent-rpm.txt b/test/required_files/iot-agent-rpm.txt new file mode 100644 index 0000000000000..b53035e68fdf6 --- /dev/null +++ b/test/required_files/iot-agent-rpm.txt @@ -0,0 +1 @@ +/usr/lib/systemd/system/datadog-agent.service diff --git a/tools/NamedPipeCmd/main.go b/tools/NamedPipeCmd/main.go new file mode 100644 index 0000000000000..05cf78369470e --- /dev/null +++ b/tools/NamedPipeCmd/main.go @@ -0,0 +1,142 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build windows + +// Package namedpipecmd is the entrypoint for the NamedPipeCmd tool +package namedpipecmd + +import ( + "context" + "flag" + "fmt" + "io" + "net" + "net/http" + "os" + "time" + + "github.com/DataDog/datadog-agent/pkg/api/util" + winio "github.com/Microsoft/go-winio" +) + +var ( + quiet = flag.Bool("quiet", false, "Only return the exit code on failure, or the JSON output on success") +) + +func exitWithError(err error) { + fmt.Printf("\nError: %s\n", err.Error()) + os.Exit(1) +} + +func exitWithErrorCode(err int) { + fmt.Printf("Error: %d\n", err) + os.Exit(err) +} + +func fprintf(format string, a ...interface{}) { + if !*quiet { + fmt.Printf(format, a...) + } +} + +func main() { + + method := flag.String("method", "", "GET or POST") + path := flag.String("path", "", "URI path") + //payload := flag.String(payload, "", "POST payload") + flag.Parse() + + // This should match SystemProbeProductionPipeName in + // "github.com/DataDog/datadog-agent/pkg/process/net" + pipePath := `\\.\pipe\dd_system_probe` + fmt.Printf("Connecting to named pipe %s ... ", pipePath) + + // The Go wrapper for named pipes does not expose buffer size for the client. + // It seems the client named pipe is fixed with 4K bytes for its buffer. + pipeClient, err := winio.DialPipe(pipePath, nil) + if err != nil { + exitWithError(err) + return + } + + fprintf("connected") + + defer func() { + fprintf("\nClosing named pipe...\n") + pipeClient.Close() + + }() + + // The HTTP client still needs the URL as part of the request even though + // the underlying transport is a named pipe. + method := os.Args[1] + uriPath := os.Args[2] + url := "http://localhost" + uriPath + + if (*method != "GET") && (*method != "POST") { + fprintf("Invalid HTTP method: %s\n", *method) + os.Exit(1) + } + + // This HTTP client handles formatting and chunked messages. + fmt.Printf("Creating HTTP client. ") + httpClient := http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConns: 2, + IdleConnTimeout: 30 * time.Second, + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return pipeClient, nil + }, + TLSHandshakeTimeout: 1 * time.Second, + ResponseHeaderTimeout: 5 * time.Second, + ExpectContinueTimeout: 50 * time.Millisecond, + }, + } + + fprintf("Setting up options. ") + + options := &util.ReqOptions{Conn: 1} + if options.Authtoken == "" { + options.Authtoken = util.GetAuthToken() + } + + if options.Ctx == nil { + options.Ctx = context.Background() + } + + fprintf("Creating request. \n") + + req, err := http.NewRequestWithContext(options.Ctx, *method, url, nil) + if err != nil { + exitWithError(err) + } + + // Set required headers. + req.Header.Set("User-Agent", "namedpipecmd/1.1") + req.Header.Set("Accept-Encoding", "gzip") + if options.Conn == 1 { + req.Close = true + } + + fprintf("Sending HTTP request...\n") + + result, err := httpClient.Do(req) + if err != nil { + exitWithError(err) + } + + body, err := io.ReadAll(result.Body) + result.Body.Close() + if err != nil { + exitWithError(err) + } + + fprintf("Received %d bytes. Status %d\n", len(body), result.StatusCode) + + fmt.Printf("\n---------------------------------------\n\n") + fmt.Printf("%s\n", body) +} diff --git a/tools/agent_QA/ddqa_template_config.toml b/tools/agent_QA/ddqa_template_config.toml new file mode 100644 index 0000000000000..dfba3ea83d661 --- /dev/null +++ b/tools/agent_QA/ddqa_template_config.toml @@ -0,0 +1,11 @@ +global_config_source = "aHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL0RhdGFEb2cvZ2l0aHViLW1ldGFkYXRhL21hc3Rlci9qaXJhLnRvbWw=" +repo = "datadog-agent" +cache_dir = "" +jira_statuses = [ + "TODO", + "Testing", + "Done", +] +ignored_labels = [ + "qa/skip-qa", +] diff --git a/tools/ci/aws_ssm_get_wrapper.ps1 b/tools/ci/aws_ssm_get_wrapper.ps1 deleted file mode 100644 index 158301f4a08c6..0000000000000 --- a/tools/ci/aws_ssm_get_wrapper.ps1 +++ /dev/null @@ -1,23 +0,0 @@ -param ( - [string]$parameterName -) - -$retryCount = 0 -$maxRetries = 10 - -while ($retryCount -lt $maxRetries) { - $result = (aws ssm get-parameter --region us-east-1 --name $parameterName --with-decryption --query "Parameter.Value" --output text 2> awsErrorFile.txt) - $error = Get-Content awsErrorFile.txt - if ($result) { - $result - break - } - if ($error -match "Unable to locate credentials") { - # See 5th row in https://docs.google.com/spreadsheets/d/1JvdN0N-RdNEeOJKmW_ByjBsr726E3ZocCKU8QoYchAc - Write-Error "Permanent error: unable to locate AWS credentials, not retrying" - exit 1 - } - - $retryCount++ - Start-Sleep -Seconds ([math]::Pow(2, $retryCount)) -} diff --git a/tools/ci/docker-login.ps1 b/tools/ci/docker-login.ps1 index c0d4194bafa5a..840b6b786492d 100644 --- a/tools/ci/docker-login.ps1 +++ b/tools/ci/docker-login.ps1 @@ -6,8 +6,18 @@ If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } # DockerHub login -$DOCKER_REGISTRY_LOGIN = $(& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_LOGIN_SSM_KEY") -$DOCKER_REGISTRY_PWD = $(& "C:\mnt\tools\ci\aws_ssm_get_wrapper.ps1" "$Env:DOCKER_REGISTRY_PWD_SSM_KEY") +$tmpfile = [System.IO.Path]::GetTempFileName() +& "C:\mnt\tools\ci\fetch_secret.ps1" -parameterName "$Env:DOCKER_REGISTRY_LOGIN" -tempFile "$tmpfile" +If ($lastExitCode -ne "0") { + throw "Previous command returned $lastExitCode" +} +$DOCKER_REGISTRY_LOGIN = $(cat "$tmpfile") +& "C:\mnt\tools\ci\fetch_secret.ps1" -parameterName "$Env:DOCKER_REGISTRY_PWD" -tempFile "$tmpfile" +If ($lastExitCode -ne "0") { + throw "Previous command returned $lastExitCode" +} +$DOCKER_REGISTRY_PWD = $(cat "$tmpfile") +Remove-Item "$tmpfile" docker login --username "${DOCKER_REGISTRY_LOGIN}" --password "${DOCKER_REGISTRY_PWD}" "docker.io" If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" diff --git a/tools/ci/fetch_secret.ps1 b/tools/ci/fetch_secret.ps1 new file mode 100644 index 0000000000000..10ea137405d74 --- /dev/null +++ b/tools/ci/fetch_secret.ps1 @@ -0,0 +1,35 @@ +param ( + [string]$parameterName, + [string]$parameterField, + [string]$tempFile +) + +$retryCount = 0 +$maxRetries = 10 + +# To catch the error message from aws cli +$ErrorActionPreference = "Continue" + +while ($retryCount -lt $maxRetries) { + if ($parameterField) { + $result = (vault kv get -field="$parameterField" kv/k8s/gitlab-runner/datadog-agent/"$parameterName" 2> errorFile.txt) + } else { + $result = (aws ssm get-parameter --region us-east-1 --name $parameterName --with-decryption --query "Parameter.Value" --output text 2> errorFile.txt) + } + $error = Get-Content errorFile.txt + if ($result) { + "$result" | Out-File -FilePath "$tempFile" -Encoding ASCII + exit 0 + } + if ($error -match "Unable to locate credentials") { + # See 5th row in https://docs.google.com/spreadsheets/d/1JvdN0N-RdNEeOJKmW_ByjBsr726E3ZocCKU8QoYchAc + Write-Error "Permanent error: unable to locate credentials, not retrying" + exit 42 + } + + $retryCount++ + Start-Sleep -Seconds ([math]::Pow(2, $retryCount)) +} + +Write-Error "Failed to retrieve $parameterName after $maxRetries retries" +exit 1 diff --git a/tools/ci/aws_ssm_get_wrapper.sh b/tools/ci/fetch_secret.sh similarity index 51% rename from tools/ci/aws_ssm_get_wrapper.sh rename to tools/ci/fetch_secret.sh index 2fb8298145d9b..dadbe93867331 100755 --- a/tools/ci/aws_ssm_get_wrapper.sh +++ b/tools/ci/fetch_secret.sh @@ -3,23 +3,25 @@ retry_count=0 max_retries=10 parameter_name="$1" - -# shellcheck disable=SC1091 -source /root/.bashrc > /dev/null 2>&1 +parameter_field="$2" set +x while [[ $retry_count -lt $max_retries ]]; do - result=$(aws ssm get-parameter --region us-east-1 --name "$parameter_name" --with-decryption --query "Parameter.Value" --output text 2> awsErrorFile) - error=$( errorFile) + else + result=$(aws ssm get-parameter --region us-east-1 --name "$parameter_name" --with-decryption --query "Parameter.Value" --output text 2> errorFile) + fi + error=$(&2 echo "Permanent error: unable to locate AWS credentials, not retrying" - exit 1 + >&2 echo "Permanent error: unable to locate credentials, not retrying" + exit 42 fi retry_count=$((retry_count+1)) sleep $((2**retry_count)) diff --git a/tools/ci/junit_upload.sh b/tools/ci/junit_upload.sh index 8d13895f0e912..32c4621acf154 100755 --- a/tools/ci/junit_upload.sh +++ b/tools/ci/junit_upload.sh @@ -1,14 +1,13 @@ #!/bin/bash # shellcheck source=/dev/null -source /root/.bashrc # junit file name can differ in kitchen or macos context junit_files="junit-*.tgz" if [[ -n "$1" ]]; then junit_files="$1" fi -GITLAB_TOKEN="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$GITLAB_READ_API_TOKEN_SSM_NAME")" -DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/aws_ssm_get_wrapper.sh "$API_KEY_ORG2_SSM_NAME")" +GITLAB_TOKEN="$("$CI_PROJECT_DIR"/tools/ci/fetch_secret.sh "$GITLAB_READ_API_TOKEN")" +DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/fetch_secret.sh "$API_KEY_ORG2")" export DATADOG_API_KEY export GITLAB_TOKEN error=0 @@ -19,6 +18,7 @@ for file in $junit_files; do fi inv -e junit-upload --tgz-path "$file" || error=1 done +unset DATADOG_API_KEY GITLAB_TOKEN # Never fail on Junit upload failure since it would prevent the other after scripts to run. if [ $error -eq 1 ]; then echo "Error: Junit upload failed" diff --git a/tools/gdb/Dockerfile b/tools/gdb/Dockerfile index 4adfa70473a9a..5dfd0b06eda82 100644 --- a/tools/gdb/Dockerfile +++ b/tools/gdb/Dockerfile @@ -6,7 +6,7 @@ RUN rm -vf /etc/ssl/openssl.cnf RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install -y gdb build-essential strace less vim # Install go -RUN curl -fSL -o golang.tgz https://go.dev/dl/go1.22.6.linux-amd64.tar.gz +RUN curl -fSL -o golang.tgz https://go.dev/dl/go1.22.7.linux-amd64.tar.gz RUN tar xzvf golang.tgz RUN ln -s /go /goroot diff --git a/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 b/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 new file mode 100644 index 0000000000000..4e93448f2eac2 --- /dev/null +++ b/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 @@ -0,0 +1,238 @@ +<# + .SYNOPSIS + Downloads and installs Datadog on the machine. +#> +[CmdletBinding(DefaultParameterSetName = 'Default')] +$SCRIPT_VERSION = "1.0.0" +$GENERAL_ERROR_CODE = 1 + +# ExitCodeException can be used to report failures from executables that set $LASTEXITCODE +class ExitCodeException : Exception { + [string] $LastExitCode + + ExitCodeException($message, $lastExitCode) : base($message) { + $this.LastExitCode = $lastExitCode + } +} + +function Update-ConfigFile($regex, $replacement) { + $configFile = Join-Path (Get-ItemPropertyValue -Path "HKLM:\\SOFTWARE\\Datadog\\Datadog Agent" -Name "ConfigRoot") "datadog.yaml" + if (-Not $configFile) { + $configFile = "C:\\ProgramData\\Datadog\\datadog.yaml" + } + if (-Not (Test-Path $configFile)) { + throw "datadog.yaml doesn't exist" + } + if (((Get-Content $configFile) | Select-String $regex | Measure-Object).Count -eq 0) { + Add-Content -Path $configFile -Value $replacement + } + else { + (Get-Content $configFile) -replace $regex, $replacement | Out-File $configFile + } +} + +function Send-Telemetry($payload) { + $telemetryUrl = "https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry" + if ($env:DD_SITE -eq "ddog-gov.com" -or -Not ($env:DD_API_KEY)) { + return + } + + if ($env:DD_SITE) { + $telemetryUrl = "https://instrumentation-telemetry-intake.$env:DD_SITE/api/v2/apmtelemetry" + } + $requestHeaders = @{ + "DD-Api-Key" = $env:DD_API_KEY + "Content-Type" = "application/json" + } + $result = Invoke-WebRequest -Uri $telemetryUrl -Method POST -Body $payload -Headers $requestHeaders + Write-Host "Sending telemetry: $($result.StatusCode)" +} + +function Show-Error($errorMessage, $errorCode) { + Write-Error -ErrorAction Continue @" + Datadog Install script failed: + + Error message: $($errorMessage) + Error code: $($errorCode) + +"@ + + $agentVersion = "7.x" + if ($env:DD_AGENT_MINOR_VERSION) { + $agentVersion = "7.$env:DD_AGENT_MINOR_VERSION" + } + $errorMessage = ($errorMessage -replace '"', '_' -replace '\n', ' ' -replace '\r', ' ') + + Send-Telemetry @" +{ + "request_type": "apm-onboarding-event", + "api_version": "v1", + "payload": { + "event_name": "agent.installation.error", + "tags": { + "install_id": "$(New-Guid)", + "install_type": "windows_powershell", + "install_time": "$([DateTimeOffset]::Now.ToUnixTimeSeconds())" + "agent_platform": "windows", + "agent_version: "$($agentVersion)", + "script_version": "$($SCRIPT_VERSION)" + }, + "error": { + "code": "$($errorCode)", + "message": "$($errorMessage)" + } + } +} +"@ +} + +function Start-ProcessWithOutput { + param ([string]$Path, [string[]]$ArgumentList) + $psi = New-object System.Diagnostics.ProcessStartInfo + $psi.CreateNoWindow = $true + $psi.UseShellExecute = $false + $psi.RedirectStandardOutput = $true + $psi.RedirectStandardError = $true + $psi.FileName = $Path + if ($ArgumentList.Count -gt 0) { + $psi.Arguments = $ArgumentList + } + $process = New-Object System.Diagnostics.Process + $process.StartInfo = $psi + $stdout = Register-ObjectEvent -InputObject $process -EventName 'OutputDataReceived'` + -Action { + if (![String]::IsNullOrEmpty($EventArgs.Data)) { + Write-Host $EventArgs.Data + } + } + $stderr = Register-ObjectEvent -InputObject $process -EventName 'ErrorDataReceived' ` + -Action { + if (![String]::IsNullOrEmpty($EventArgs.Data)) { + # Print stderr from process into host stderr + # Unfortunately that means this output cannot be captured from within PowerShell + # and it won't work within PowerShell ISE because it is not a console host. + [Console]::ForegroundColor = 'red' + [Console]::Error.WriteLine($EventArgs.Data) + [Console]::ResetColor() + } + } + [void]$process.Start() + $process.BeginOutputReadLine() + $process.BeginErrorReadLine() + $process.WaitForExit() + Unregister-Event -SourceIdentifier $stdout.Name + Unregister-Event -SourceIdentifier $stderr.Name + return $process.ExitCode +} + +# Set some defaults if not provided +$ddInstallerUrl = $env:DD_INSTALLER_URL +if (-Not $ddInstallerUrl) { + $ddInstallerUrl = "https://s3.amazonaws.com/dd-agent-mstesting/datadog-installer-x86_64.exe" +} + +$ddRemoteUpdates = $env:DD_REMOTE_UPDATES +if (-Not $ddRemoteUpdates) { + $ddRemoteUpdates = "false" +} + +try { + Write-Host "Welcome to the Datadog Install Script" + if (-not [Environment]::Is64BitProcess) { + throw "This command must be run in a 64-bit environment." + } + + $myWindowsID = [System.Security.Principal.WindowsIdentity]::GetCurrent() + $myWindowsPrincipal = new-object System.Security.Principal.WindowsPrincipal($myWindowsID) + $adminRole = [System.Security.Principal.WindowsBuiltInRole]::Administrator + if ($myWindowsPrincipal.IsInRole($adminRole)) { + # We are running "as Administrator" + $Host.UI.RawUI.WindowTitle = $myInvocation.MyCommand.Definition + "(Elevated)" + } + else { + # We are not running "as Administrator" + $newProcess = new-object System.Diagnostics.ProcessStartInfo "PowerShell"; + $newProcess.Arguments = $myInvocation.MyCommand.Definition; + $newProcess.Verb = "runas"; + $proc = [System.Diagnostics.Process]::Start($newProcess); + $proc.WaitForExit() + return $proc.ExitCode + } + + # Powershell does not enable TLS 1.2 by default, & we want it enabled for faster downloads + Write-Host "Forcing web requests to TLS v1.2" + [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor [System.Net.SecurityProtocolType]::Tls12 + + $installer = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath "datadog-installer-x86_64.exe" + if (Test-Path $installer) { + Remove-Item -Force $installer + } + + Write-Host "Downloading installer from $ddInstallerUrl" + [System.Net.WebClient]::new().DownloadFile($ddInstallerUrl, $installer) + + # If not set the `default-packages` won't contain the Datadog Agent + $env:DD_INSTALLER_DEFAULT_PKG_INSTALL_DATADOG_AGENT = "True" + + Write-Host "Starting bootstrap process" + $result = Start-ProcessWithOutput -Path $installer -ArgumentList "bootstrap" + if ($result -ne 0) { + # bootstrap only fails if it fails to install to install the Datadog Installer, so it's possible the Agent was not installed + throw [ExitCodeException]::new("Bootstrap failed", $result) + } + Write-Host "Bootstrap execution done" + + if (-Not (Test-Path "HKLM:\\SOFTWARE\\Datadog\\Datadog Agent")) { + throw "Agent is not installed" + } + + if ($env:DD_API_KEY) { + Write-Host "Writing DD_API_KEY" + Update-ConfigFile "^[ #]*api_key:.*" "api_key: $env:DD_API_KEY" + } + + if ($env:DD_SITE) { + Write-Host "Writing DD_SITE" + Update-ConfigFile "^[ #]*site:.*" "site: $env:DD_SITE" + } + + if ($env:DD_URL) { + Write-Host "Writing DD_URL" + Update-ConfigFile "^[ #]*dd_url:.*" "dd_url: $env:DD_URL" + } + + if ($ddRemoteUpdates) { + Write-Host "Writing DD_REMOTE_UPDATES" + Update-ConfigFile "^[ #]*remote_updates:.*" "remote_updates: $($ddRemoteUpdates.ToLower())" + } + + Send-Telemetry @" +{ + "request_type": "apm-onboarding-event", + "api_version": "v1", + "payload": { + "event_name": "agent.installation.success", + "tags": { + "install_id": "$(New-Guid)", + "install_type": "windows_powershell", + "install_time": "$([DateTimeOffset]::Now.ToUnixTimeSeconds())" + "agent_platform": "windows", + "agent_version: "$($agentVersion)", + "script_version": "$($SCRIPT_VERSION)" + } + } +} +"@ + +} +catch [ExitCodeException] { + Show-Error $_.Exception.Message $_.Exception.LastExitCode +} +catch { + Show-Error $_.Exception.Message $GENERAL_ERROR_CODE +} +finally { + Write-Host "Cleaning up..." + Remove-Item -Force -EA SilentlyContinue $installer +} +Write-Host "Datadog Install Script finished!" diff --git a/tools/windows/DatadogAgentInstaller/CustomActions/InstallStateCustomActions.cs b/tools/windows/DatadogAgentInstaller/CustomActions/InstallStateCustomActions.cs index 34cce2d6b636d..f86da9332b282 100644 --- a/tools/windows/DatadogAgentInstaller/CustomActions/InstallStateCustomActions.cs +++ b/tools/windows/DatadogAgentInstaller/CustomActions/InstallStateCustomActions.cs @@ -3,6 +3,7 @@ using Datadog.CustomActions.Native; using Microsoft.Win32; using System; +using System.Security.Cryptography; using System.Security.Principal; using ServiceController = Datadog.CustomActions.Native.ServiceController; @@ -177,6 +178,7 @@ public void GetWindowsBuildVersion() { _session.Log("WindowsBuild not found"); } + _session.Log("FIPS enabled: " + CryptoConfig.AllowOnlyFipsAlgorithms); } public static SecurityIdentifier GetPreviousAgentUser(ISession session, IRegistryServices registryServices, diff --git a/tools/windows/DatadogAgentInstaller/CustomActions/ServiceCustomAction.cs b/tools/windows/DatadogAgentInstaller/CustomActions/ServiceCustomAction.cs index 51fe43565a7d2..06fde0b96bc1d 100644 --- a/tools/windows/DatadogAgentInstaller/CustomActions/ServiceCustomAction.cs +++ b/tools/windows/DatadogAgentInstaller/CustomActions/ServiceCustomAction.cs @@ -413,7 +413,7 @@ private ActionResult StartDDServices() } catch (Exception e) { - _session.Log($"Failed to stop services: {e}"); + _session.Log($"Failed to start services: {e}"); // Allow service start to fail and continue the install } return ActionResult.Success; diff --git a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentBinaries.cs b/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentBinaries.cs index 21ebe227b750e..5db25f761e0ef 100644 --- a/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentBinaries.cs +++ b/tools/windows/DatadogAgentInstaller/WixSetup/Datadog Agent/AgentBinaries.cs @@ -28,7 +28,7 @@ public AgentBinaries(string binSource, string installerSource) { $@"{installerSource}\embedded3\python.exe", $@"{installerSource}\embedded3\python3.dll", - $@"{installerSource}\embedded3\python311.dll", + $@"{installerSource}\embedded3\python312.dll", $@"{installerSource}\embedded3\pythonw.exe" }; PythonTwoBinaries = new[]